本文整理汇总了Java中org.apache.parquet.column.page.PageReadStore类的典型用法代码示例。如果您正苦于以下问题:Java PageReadStore类的具体用法?Java PageReadStore怎么用?Java PageReadStore使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
PageReadStore类属于org.apache.parquet.column.page包,在下文中一共展示了PageReadStore类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: test
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
@Override
public void test() throws IOException {
Configuration configuration = new Configuration();
ParquetMetadata metadata = ParquetFileReader.readFooter(configuration,
super.fsPath, ParquetMetadataConverter.NO_FILTER);
ParquetFileReader reader = new ParquetFileReader(configuration,
metadata.getFileMetaData(),
super.fsPath,
metadata.getBlocks(),
metadata.getFileMetaData().getSchema().getColumns());
PageStatsValidator validator = new PageStatsValidator();
PageReadStore pageReadStore;
while ((pageReadStore = reader.readNextRowGroup()) != null) {
validator.validate(metadata.getFileMetaData().getSchema(), pageReadStore);
}
}
开发者ID:apache,项目名称:parquet-mr,代码行数:19,代码来源:TestStatistics.java
示例2: read
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
@Test
public void read(String fileName) throws IOException
{
Path path = new Path(fileName);
Configuration conf = new Configuration();
conf.set("fs.hdfs.impl", DistributedFileSystem.class.getName());
ParquetMetadata metadata = ParquetFileReader.readFooter(conf, path, NO_FILTER);
ParquetFileReader reader = new ParquetFileReader(conf, metadata.getFileMetaData(), path, metadata.getBlocks(), metadata.getFileMetaData().getSchema().getColumns());
PageReadStore pageReadStore;
PageReader pageReader;
DataPage page;
while ((pageReadStore = reader.readNextRowGroup()) != null) {
for (ColumnDescriptor cd: metadata.getFileMetaData().getSchema().getColumns()) {
pageReader = pageReadStore.getPageReader(cd);
page = pageReader.readPage();
}
}
}
开发者ID:dbiir,项目名称:RealtimeAnalysis,代码行数:20,代码来源:ParquetFileReaderTest.java
示例3: validatePages
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
public static void validatePages(Path file, List<?> expectedValues) throws IOException {
List<PageReadStore> blockReaders = readBlocksFromFile(file);
MessageType fileSchema = readSchemaFromFile(file);
int rowGroupID = 0;
int rowsRead = 0;
for (PageReadStore pageReadStore : blockReaders) {
for (ColumnDescriptor columnsDesc : fileSchema.getColumns()) {
List<DataPage> pageGroup = getPageGroupForColumn(pageReadStore, columnsDesc);
DictionaryPage dictPage = reusableCopy(getDictionaryPageForColumn(pageReadStore, columnsDesc));
List<?> expectedRowGroupValues = expectedValues.subList(rowsRead, (int)(rowsRead + pageReadStore.getRowCount()));
validateFirstToLast(rowGroupID, dictPage, pageGroup, columnsDesc, expectedRowGroupValues);
validateLastToFirst(rowGroupID, dictPage, pageGroup, columnsDesc, expectedRowGroupValues);
}
rowsRead += pageReadStore.getRowCount();
rowGroupID++;
}
}
开发者ID:apache,项目名称:parquet-mr,代码行数:20,代码来源:FileEncodingsIT.java
示例4: read
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
private static void read(PageReadStore columns, String pigSchemaString, String message) throws ParserException {
System.out.println(message);
MessageColumnIO columnIO = newColumnFactory(pigSchemaString);
TupleReadSupport tupleReadSupport = new TupleReadSupport();
Map<String, String> pigMetaData = pigMetaData(pigSchemaString);
MessageType schema = new PigSchemaConverter().convert(Utils.getSchemaFromString(pigSchemaString));
ReadContext init = tupleReadSupport.init(null, pigMetaData, schema);
RecordMaterializer<Tuple> recordConsumer = tupleReadSupport.prepareForRead(null, pigMetaData, schema, init);
RecordReader<Tuple> recordReader = columnIO.getRecordReader(columns, recordConsumer);
// TODO: put this back
// if (DEBUG) {
// recordConsumer = new RecordConsumerLoggingWrapper(recordConsumer);
// }
read(recordReader, 10000, pigSchemaString);
read(recordReader, 10000, pigSchemaString);
read(recordReader, 10000, pigSchemaString);
read(recordReader, 10000, pigSchemaString);
read(recordReader, 10000, pigSchemaString);
read(recordReader, 100000, pigSchemaString);
read(recordReader, 1000000, pigSchemaString);
System.out.println();
}
开发者ID:apache,项目名称:parquet-mr,代码行数:23,代码来源:TupleConsumerPerfTest.java
示例5: validateContains
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
private void validateContains(MessageType schema, PageReadStore pages, String[] path, int values, BytesInput bytes)
throws IOException {
PageReader pageReader = pages.getPageReader(schema.getColumnDescription(path));
DataPageV1 page = (DataPageV1) pageReader.readPage();
assertEquals(values, page.getValueCount());
assertArrayEquals(bytes.toByteArray(), page.getBytes().toByteArray());
}
开发者ID:dremio,项目名称:dremio-oss,代码行数:8,代码来源:ParquetRecordReaderTest.java
示例6: checkRead
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
private void checkRead() throws IOException
{
if (current == totalCountLoadedSoFar) {
PageReadStore pages = reader.readNextRowGroup();
if (pages == null) {
throw new IOException("expecting more rows but reached last block. Read " + current + " out of " + total);
}
MessageColumnIO columnIO = columnIOFactory.getColumnIO(requestedSchema, fileSchema, strictTypeChecking);
recordReader = columnIO.getRecordReader(pages, recordConverter, filter);
totalCountLoadedSoFar += pages.getRowCount();
++ currentBlock;
}
}
开发者ID:CyberAgent,项目名称:embulk-input-parquet_hadoop,代码行数:15,代码来源:ParquetRowReader.java
示例7: load
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
public ITable load() {
try {
Configuration conf = new Configuration();
System.setProperty("hadoop.home.dir", "/");
conf.set("hadoop.security.authentication", "simple");
conf.set("hadoop.security.authorization", "false");
Path path = new Path(this.filename);
ParquetMetadata md = ParquetFileReader.readFooter(conf, path,
ParquetMetadataConverter.NO_FILTER);
MessageType schema = md.getFileMetaData().getSchema();
ParquetFileReader r = new ParquetFileReader(conf, path, md);
IAppendableColumn[] cols = this.createColumns(md);
MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema);
PageReadStore pages;
while (null != (pages = r.readNextRowGroup())) {
final long rows = pages.getRowCount();
RecordReader<Group> recordReader = columnIO.getRecordReader(
pages, new GroupRecordConverter(schema));
for (int i = 0; i < rows; i++) {
Group g = recordReader.read();
appendGroup(cols, g, md.getFileMetaData().getSchema().getColumns());
}
}
for (IAppendableColumn c: cols)
c.seal();
return new Table(cols);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
开发者ID:vmware,项目名称:hillview,代码行数:33,代码来源:ParquetReader.java
示例8: checkRead
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
private void checkRead() throws IOException {
if (current == totalCountLoadedSoFar) {
if (current != 0) {
totalTimeSpentProcessingRecords += (System.currentTimeMillis() - startedAssemblingCurrentBlockAt);
if (Log.DEBUG) {
LOG.debug("Assembled and processed " + totalCountLoadedSoFar + " records from " + columnCount + " columns in " + totalTimeSpentProcessingRecords + " ms: "+((float)totalCountLoadedSoFar / totalTimeSpentProcessingRecords) + " rec/ms, " + ((float)totalCountLoadedSoFar * columnCount / totalTimeSpentProcessingRecords) + " cell/ms");
final long totalTime = totalTimeSpentProcessingRecords + totalTimeSpentReadingBytes;
if (totalTime != 0) {
final long percentReading = 100 * totalTimeSpentReadingBytes / totalTime;
final long percentProcessing = 100 * totalTimeSpentProcessingRecords / totalTime;
LOG.debug("time spent so far " + percentReading + "% reading ("+totalTimeSpentReadingBytes+" ms) and " + percentProcessing + "% processing ("+totalTimeSpentProcessingRecords+" ms)");
}
}
}
if (Log.DEBUG) LOG.debug("at row " + current + ". reading next block");
long t0 = System.currentTimeMillis();
PageReadStore pages = reader.readNextRowGroup();
if (pages == null) {
throw new IOException("expecting more rows but reached last block. Read " + current + " out of " + total);
}
long timeSpentReading = System.currentTimeMillis() - t0;
totalTimeSpentReadingBytes += timeSpentReading;
BenchmarkCounter.incrementTime(timeSpentReading);
if (Log.INFO) LOG.info("block read in memory in " + timeSpentReading + " ms. row count = " + pages.getRowCount());
if (Log.DEBUG) LOG.debug("initializing Record assembly with requested schema " + requestedSchema);
MessageColumnIO columnIO = columnIOFactory.getColumnIO(requestedSchema, fileSchema, strictTypeChecking);
recordReader = columnIO.getRecordReader(pages, recordConverter, filter);
startedAssemblingCurrentBlockAt = System.currentTimeMillis();
totalCountLoadedSoFar += pages.getRowCount();
++ currentBlock;
}
}
开发者ID:apache,项目名称:tajo,代码行数:34,代码来源:InternalParquetRecordReader.java
示例9: checkRead
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
private void checkRead() throws IOException {
if (current == totalCountLoadedSoFar) {
if (current != 0) {
totalTimeSpentProcessingRecords += (System.currentTimeMillis() - startedAssemblingCurrentBlockAt);
if (Log.isLoggingFor("info")) {
Log.info("Assembled and processed " + totalCountLoadedSoFar + " records from " + columnCount + " columns in " + totalTimeSpentProcessingRecords + " ms: "+((float)totalCountLoadedSoFar / totalTimeSpentProcessingRecords) + " rec/ms, " + ((float)totalCountLoadedSoFar * columnCount / totalTimeSpentProcessingRecords) + " cell/ms");
final long totalTime = totalTimeSpentProcessingRecords + totalTimeSpentReadingBytes;
if (totalTime != 0) {
final long percentReading = 100 * totalTimeSpentReadingBytes / totalTime;
final long percentProcessing = 100 * totalTimeSpentProcessingRecords / totalTime;
Log.info("time spent so far " + percentReading + "% reading ("+totalTimeSpentReadingBytes+" ms) and " + percentProcessing + "% processing ("+totalTimeSpentProcessingRecords+" ms)");
}
}
}
Log.info("at row " + current + ". reading next block");
long t0 = System.currentTimeMillis();
PageReadStore pages = reader.readNextRowGroup();
if (pages == null) {
throw new IOException("expecting more rows but reached last block. Read " + current + " out of " + total);
}
long timeSpentReading = System.currentTimeMillis() - t0;
totalTimeSpentReadingBytes += timeSpentReading;
if (Log.isLoggingFor("info")) Log.info("block read in memory in " + timeSpentReading + " ms. row count = " + pages.getRowCount());
if (Log.isLoggingFor("debug")) Log.debug("initializing Record assembly with requested schema " + requestedSchema);
MessageColumnIO columnIO = columnIOFactory.getColumnIO(requestedSchema, fileSchema, strictTypeChecking);
recordReader = columnIO.getRecordReader(pages, recordConverter, filter);
startedAssemblingCurrentBlockAt = System.currentTimeMillis();
totalCountLoadedSoFar += pages.getRowCount();
++ currentBlock;
}
}
开发者ID:h2oai,项目名称:h2o-3,代码行数:33,代码来源:H2OInternalParquetReader.java
示例10: getRecordReader
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
/**
* @deprecated use {@link #getRecordReader(PageReadStore, RecordMaterializer, Filter)}
*/
@Deprecated
public <T> RecordReader<T> getRecordReader(PageReadStore columns,
RecordMaterializer<T> recordMaterializer,
UnboundRecordFilter filter) {
return getRecordReader(columns, recordMaterializer, FilterCompat.get(filter));
}
开发者ID:apache,项目名称:parquet-mr,代码行数:10,代码来源:MessageColumnIO.java
示例11: checkRead
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
private void checkRead() throws IOException {
if (current == totalCountLoadedSoFar) {
if (current != 0) {
totalTimeSpentProcessingRecords += (System.currentTimeMillis() - startedAssemblingCurrentBlockAt);
if (LOG.isInfoEnabled()) {
LOG.info("Assembled and processed " + totalCountLoadedSoFar + " records from " + columnCount + " columns in " + totalTimeSpentProcessingRecords + " ms: "+((float)totalCountLoadedSoFar / totalTimeSpentProcessingRecords) + " rec/ms, " + ((float)totalCountLoadedSoFar * columnCount / totalTimeSpentProcessingRecords) + " cell/ms");
final long totalTime = totalTimeSpentProcessingRecords + totalTimeSpentReadingBytes;
if (totalTime != 0) {
final long percentReading = 100 * totalTimeSpentReadingBytes / totalTime;
final long percentProcessing = 100 * totalTimeSpentProcessingRecords / totalTime;
LOG.info("time spent so far " + percentReading + "% reading ("+totalTimeSpentReadingBytes+" ms) and " + percentProcessing + "% processing ("+totalTimeSpentProcessingRecords+" ms)");
}
}
}
LOG.info("at row " + current + ". reading next block");
long t0 = System.currentTimeMillis();
PageReadStore pages = reader.readNextRowGroup();
if (pages == null) {
throw new IOException("expecting more rows but reached last block. Read " + current + " out of " + total);
}
long timeSpentReading = System.currentTimeMillis() - t0;
totalTimeSpentReadingBytes += timeSpentReading;
BenchmarkCounter.incrementTime(timeSpentReading);
if (LOG.isInfoEnabled()) LOG.info("block read in memory in {} ms. row count = {}", timeSpentReading, pages.getRowCount());
LOG.debug("initializing Record assembly with requested schema {}", requestedSchema);
MessageColumnIO columnIO = columnIOFactory.getColumnIO(requestedSchema, fileSchema, strictTypeChecking);
recordReader = columnIO.getRecordReader(pages, recordConverter,
filterRecords ? filter : FilterCompat.NOOP);
startedAssemblingCurrentBlockAt = System.currentTimeMillis();
totalCountLoadedSoFar += pages.getRowCount();
++ currentBlock;
}
}
开发者ID:apache,项目名称:parquet-mr,代码行数:35,代码来源:InternalParquetRecordReader.java
示例12: getPageGroupForColumn
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
private static List<DataPage> getPageGroupForColumn(PageReadStore pageReadStore, ColumnDescriptor columnDescriptor) {
PageReader pageReader = pageReadStore.getPageReader(columnDescriptor);
List<DataPage> pageGroup = new ArrayList<DataPage>();
DataPage page;
while ((page = pageReader.readPage()) != null) {
pageGroup.add(reusableCopy(page));
}
return pageGroup;
}
开发者ID:apache,项目名称:parquet-mr,代码行数:12,代码来源:FileEncodingsIT.java
示例13: readBlocksFromFile
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
private static List<PageReadStore> readBlocksFromFile(Path file) throws IOException {
List<PageReadStore> rowGroups = new ArrayList<PageReadStore>();
ParquetMetadata metadata = ParquetFileReader.readFooter(configuration, file, ParquetMetadataConverter.NO_FILTER);
ParquetFileReader fileReader = new ParquetFileReader(configuration, metadata.getFileMetaData(), file, metadata.getBlocks(),
metadata.getFileMetaData().getSchema().getColumns());
PageReadStore group;
while ((group = fileReader.readNextRowGroup()) != null) {
rowGroups.add(group);
}
return rowGroups;
}
开发者ID:apache,项目名称:parquet-mr,代码行数:15,代码来源:FileEncodingsIT.java
示例14: validate
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
public void validate(MessageType schema, PageReadStore store) {
for (ColumnDescriptor desc : schema.getColumns()) {
PageReader reader = store.getPageReader(desc);
DictionaryPage dict = reader.readDictionaryPage();
DataPage page;
while ((page = reader.readPage()) != null) {
validateStatsForPage(page, dict, desc);
}
}
}
开发者ID:apache,项目名称:parquet-mr,代码行数:11,代码来源:TestStatistics.java
示例15: check
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
private String check(String file) throws IOException {
Path path = qualifiedPath(file);
ParquetMetadata footer = ParquetFileReader.readFooter(
getConf(), path, ParquetMetadataConverter.NO_FILTER);
FileMetaData meta = footer.getFileMetaData();
String createdBy = meta.getCreatedBy();
if (CorruptStatistics.shouldIgnoreStatistics(createdBy, BINARY)) {
// create fake metadata that will read corrupt stats and return them
FileMetaData fakeMeta = new FileMetaData(
meta.getSchema(), meta.getKeyValueMetaData(), Version.FULL_VERSION);
// get just the binary columns
List<ColumnDescriptor> columns = Lists.newArrayList();
Iterables.addAll(columns, Iterables.filter(
meta.getSchema().getColumns(),
new Predicate<ColumnDescriptor>() {
@Override
public boolean apply(@Nullable ColumnDescriptor input) {
return input != null && input.getType() == BINARY;
}
}));
// now check to see if the data is actually corrupt
ParquetFileReader reader = new ParquetFileReader(getConf(),
fakeMeta, path, footer.getBlocks(), columns);
try {
PageStatsValidator validator = new PageStatsValidator();
for (PageReadStore pages = reader.readNextRowGroup(); pages != null;
pages = reader.readNextRowGroup()) {
validator.validate(columns, pages);
}
} catch (BadStatsException e) {
return e.getMessage();
}
}
return null;
}
开发者ID:apache,项目名称:parquet-mr,代码行数:41,代码来源:CheckParquet251Command.java
示例16: getRecordReader
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
private RecordReaderImplementation<Group> getRecordReader(MessageColumnIO columnIO, MessageType schema, PageReadStore pageReadStore) {
RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
return (RecordReaderImplementation<Group>)columnIO.getRecordReader(pageReadStore, recordConverter);
}
开发者ID:apache,项目名称:parquet-mr,代码行数:6,代码来源:TestColumnIO.java
示例17: readNextRowGroup
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
/**
* Reads all the columns requested from the row group at the current file position.
* @throws IOException if an error occurs while reading
* @return the PageReadStore which can provide PageReaders for each column.
*/
public PageReadStore readNextRowGroup() throws IOException {
if (currentBlock == blocks.size()) {
return null;
}
BlockMetaData block = blocks.get(currentBlock);
if (block.getRowCount() == 0) {
throw new RuntimeException("Illegal row group of 0 rows");
}
this.currentRowGroup = new ColumnChunkPageReadStore(block.getRowCount());
// prepare the list of consecutive chunks to read them in one scan
List<ConsecutiveChunkList> allChunks = new ArrayList<ConsecutiveChunkList>();
ConsecutiveChunkList currentChunks = null;
for (ColumnChunkMetaData mc : block.getColumns()) {
ColumnPath pathKey = mc.getPath();
BenchmarkCounter.incrementTotalBytes(mc.getTotalSize());
ColumnDescriptor columnDescriptor = paths.get(pathKey);
if (columnDescriptor != null) {
long startingPos = mc.getStartingPos();
// first chunk or not consecutive => new list
if (currentChunks == null || currentChunks.endPos() != startingPos) {
currentChunks = new ConsecutiveChunkList(startingPos);
allChunks.add(currentChunks);
}
currentChunks.addChunk(new ChunkDescriptor(columnDescriptor, mc, startingPos, (int)mc.getTotalSize()));
}
}
// actually read all the chunks
for (ConsecutiveChunkList consecutiveChunks : allChunks) {
final List<Chunk> chunks = consecutiveChunks.readAll(f);
for (Chunk chunk : chunks) {
currentRowGroup.addColumn(chunk.descriptor.col, chunk.readAllPages());
}
}
// avoid re-reading bytes the dictionary reader is used after this call
if (nextDictionaryReader != null) {
nextDictionaryReader.setRowGroup(currentRowGroup);
}
advanceToNextBlock();
return currentRowGroup;
}
开发者ID:apache,项目名称:parquet-mr,代码行数:49,代码来源:ParquetFileReader.java
示例18: test
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
@Test
public void test() throws Exception {
Path file = new Path("target/test/TestColumnChunkPageWriteStore/test.parquet");
Path root = file.getParent();
FileSystem fs = file.getFileSystem(conf);
if (fs.exists(root)) {
fs.delete(root, true);
}
fs.mkdirs(root);
MessageType schema = MessageTypeParser.parseMessageType("message test { repeated binary bar; }");
ColumnDescriptor col = schema.getColumns().get(0);
Encoding dataEncoding = PLAIN;
int valueCount = 10;
int d = 1;
int r = 2;
int v = 3;
BytesInput definitionLevels = BytesInput.fromInt(d);
BytesInput repetitionLevels = BytesInput.fromInt(r);
Statistics<?> statistics = new BinaryStatistics();
BytesInput data = BytesInput.fromInt(v);
int rowCount = 5;
int nullCount = 1;
{
ParquetFileWriter writer = new ParquetFileWriter(conf, schema, file);
writer.start();
writer.startBlock(rowCount);
{
ColumnChunkPageWriteStore store = new ColumnChunkPageWriteStore(compressor(GZIP), schema , new HeapByteBufferAllocator());
PageWriter pageWriter = store.getPageWriter(col);
pageWriter.writePageV2(
rowCount, nullCount, valueCount,
repetitionLevels, definitionLevels,
dataEncoding, data,
statistics);
store.flushToFileWriter(writer);
}
writer.endBlock();
writer.end(new HashMap<String, String>());
}
{
ParquetMetadata footer = ParquetFileReader.readFooter(conf, file, NO_FILTER);
ParquetFileReader reader = new ParquetFileReader(
conf, footer.getFileMetaData(), file, footer.getBlocks(), schema.getColumns());
PageReadStore rowGroup = reader.readNextRowGroup();
PageReader pageReader = rowGroup.getPageReader(col);
DataPageV2 page = (DataPageV2)pageReader.readPage();
assertEquals(rowCount, page.getRowCount());
assertEquals(nullCount, page.getNullCount());
assertEquals(valueCount, page.getValueCount());
assertEquals(d, intValue(page.getDefinitionLevels()));
assertEquals(r, intValue(page.getRepetitionLevels()));
assertEquals(dataEncoding, page.getDataEncoding());
assertEquals(v, intValue(page.getData()));
assertEquals(statistics.toString(), page.getStatistics().toString());
reader.close();
}
}
开发者ID:apache,项目名称:parquet-mr,代码行数:60,代码来源:TestColumnChunkPageWriteStore.java
示例19: validateContains
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
private void validateContains(MessageType schema, PageReadStore pages, String[] path, int values, BytesInput bytes) throws IOException {
PageReader pageReader = pages.getPageReader(schema.getColumnDescription(path));
DataPage page = pageReader.readPage();
assertEquals(values, page.getValueCount());
assertArrayEquals(bytes.toByteArray(), ((DataPageV1)page).getBytes().toByteArray());
}
开发者ID:apache,项目名称:parquet-mr,代码行数:7,代码来源:TestParquetFileWriter.java
示例20: getDictionaryPageForColumn
import org.apache.parquet.column.page.PageReadStore; //导入依赖的package包/类
private static DictionaryPage getDictionaryPageForColumn(PageReadStore pageReadStore, ColumnDescriptor columnDescriptor) {
PageReader pageReader = pageReadStore.getPageReader(columnDescriptor);
return pageReader.readDictionaryPage();
}
开发者ID:apache,项目名称:parquet-mr,代码行数:5,代码来源:FileEncodingsIT.java
注:本文中的org.apache.parquet.column.page.PageReadStore类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论