org.apache.parquet.io.ColumnIOFactory Java Examples
The following examples show how to use
org.apache.parquet.io.ColumnIOFactory.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestThriftToPigCompatibility.java From parquet-mr with Apache License 2.0 | 6 votes |
/** * <ul> steps: * <li>Writes using the thrift mapping * <li>Reads using the pig mapping * <li>Use Elephant bird to convert from thrift to pig * <li>Check that both transformations give the same result * @param o the object to convert * @throws TException */ public static <T extends TBase<?,?>> void validateSameTupleAsEB(T o) throws TException { final ThriftSchemaConverter thriftSchemaConverter = new ThriftSchemaConverter(); @SuppressWarnings("unchecked") final Class<T> class1 = (Class<T>) o.getClass(); final MessageType schema = thriftSchemaConverter.convert(class1); final StructType structType = ThriftSchemaConverter.toStructType(class1); final ThriftToPig<T> thriftToPig = new ThriftToPig<T>(class1); final Schema pigSchema = thriftToPig.toSchema(); final TupleRecordMaterializer tupleRecordConverter = new TupleRecordMaterializer(schema, pigSchema, true); RecordConsumer recordConsumer = new ConverterConsumer(tupleRecordConverter.getRootConverter(), schema); final MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema); ParquetWriteProtocol p = new ParquetWriteProtocol(new RecordConsumerLoggingWrapper(recordConsumer), columnIO, structType); o.write(p); final Tuple t = tupleRecordConverter.getCurrentRecord(); final Tuple expected = thriftToPig.getPigTuple(o); assertEquals(expected.toString(), t.toString()); final MessageType filtered = new PigSchemaConverter().filter(schema, pigSchema); assertEquals(schema.toString(), filtered.toString()); }
Example #2
Source File: ParquetResolverTest.java From pxf with Apache License 2.0 | 6 votes |
@SuppressWarnings("deprecation") private List<Group> readParquetFile(String file, long expectedSize, MessageType schema) throws IOException { List<Group> result = new ArrayList<>(); String parquetFile = Objects.requireNonNull(getClass().getClassLoader().getResource("parquet/" + file)).getPath(); Path path = new Path(parquetFile); ParquetFileReader fileReader = new ParquetFileReader(new Configuration(), path, ParquetMetadataConverter.NO_FILTER); PageReadStore rowGroup; while ((rowGroup = fileReader.readNextRowGroup()) != null) { MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema); RecordReader<Group> recordReader = columnIO.getRecordReader(rowGroup, new GroupRecordConverter(schema)); long rowCount = rowGroup.getRowCount(); for (long i = 0; i < rowCount; i++) { result.add(recordReader.read()); } } fileReader.close(); assertEquals(expectedSize, result.size()); return result; }
Example #3
Source File: InternalParquetRecordReader.java From parquet-mr with Apache License 2.0 | 6 votes |
public void initialize(ParquetFileReader reader, Configuration configuration) throws IOException { // initialize a ReadContext for this file this.reader = reader; FileMetaData parquetFileMetadata = reader.getFooter().getFileMetaData(); this.fileSchema = parquetFileMetadata.getSchema(); Map<String, String> fileMetadata = parquetFileMetadata.getKeyValueMetaData(); ReadSupport.ReadContext readContext = readSupport.init(new InitContext( configuration, toSetMultiMap(fileMetadata), fileSchema)); this.columnIOFactory = new ColumnIOFactory(parquetFileMetadata.getCreatedBy()); this.requestedSchema = readContext.getRequestedSchema(); this.columnCount = requestedSchema.getPaths().size(); // Setting the projection schema before running any filtering (e.g. getting filtered record count) // because projection impacts filtering reader.setRequestedSchema(requestedSchema); this.recordConverter = readSupport.prepareForRead( configuration, fileMetadata, fileSchema, readContext); this.strictTypeChecking = configuration.getBoolean(STRICT_TYPE_CHECKING, true); this.total = reader.getFilteredRecordCount(); this.unmaterializableRecordCounter = new UnmaterializableRecordCounter(configuration, total); this.filterRecords = configuration.getBoolean(RECORD_FILTERING_ENABLED, true); LOG.info("RecordReader initialized will read a total of {} records.", total); }
Example #4
Source File: InternalParquetRecordReader.java From tajo with Apache License 2.0 | 6 votes |
public void initialize(FileMetaData parquetFileMetadata, Path file, List<BlockMetaData> blocks, Configuration configuration) throws IOException { // initialize a ReadContext for this file Map<String, String> fileMetadata = parquetFileMetadata.getKeyValueMetaData(); ReadSupport.ReadContext readContext = readSupport.init(new InitContext( configuration, toSetMultiMap(fileMetadata), fileSchema)); this.columnIOFactory = new ColumnIOFactory(parquetFileMetadata.getCreatedBy()); this.requestedSchema = readContext.getRequestedSchema(); this.fileSchema = parquetFileMetadata.getSchema(); this.file = file; this.columnCount = requestedSchema.getPaths().size(); this.recordConverter = readSupport.prepareForRead( configuration, fileMetadata, fileSchema, readContext); this.strictTypeChecking = configuration.getBoolean(STRICT_TYPE_CHECKING, true); List<ColumnDescriptor> columns = requestedSchema.getColumns(); reader = new ParquetFileReader(configuration, parquetFileMetadata, file, blocks, columns); for (BlockMetaData block : blocks) { total += block.getRowCount(); } this.unmaterializableRecordCounter = new UnmaterializableRecordCounter(configuration, total); LOG.info("RecordReader initialized will read a total of " + total + " records."); }
Example #5
Source File: SparkModelParser.java From ignite with Apache License 2.0 | 5 votes |
/** * Load SVM model. * * @param pathToMdl Path to model. * @param learningEnvironment Learning environment. */ private static Model loadLinearSVMModel(String pathToMdl, LearningEnvironment learningEnvironment) { Vector coefficients = null; double interceptor = 0; try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) { PageReadStore pages; final MessageType schema = r.getFooter().getFileMetaData().getSchema(); final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema); while (null != (pages = r.readNextRowGroup())) { final long rows = pages.getRowCount(); final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema)); for (int i = 0; i < rows; i++) { final SimpleGroup g = (SimpleGroup)recordReader.read(); interceptor = readSVMInterceptor(g); coefficients = readSVMCoefficients(g); } } } catch (IOException e) { String msg = "Error reading parquet file: " + e.getMessage(); learningEnvironment.logger().log(MLLogger.VerboseLevel.HIGH, msg); e.printStackTrace(); } return new SVMLinearClassificationModel(coefficients, interceptor); }
Example #6
Source File: TestParquetReadProtocol.java From parquet-mr with Apache License 2.0 | 5 votes |
private <T extends TBase<?,?>> void validate(T expected) throws TException { @SuppressWarnings("unchecked") final Class<T> thriftClass = (Class<T>)expected.getClass(); final MemPageStore memPageStore = new MemPageStore(1); final ThriftSchemaConverter schemaConverter = new ThriftSchemaConverter(); final MessageType schema = schemaConverter.convert(thriftClass); LOG.info("{}", schema); final MessageColumnIO columnIO = new ColumnIOFactory(true).getColumnIO(schema); final ColumnWriteStoreV1 columns = new ColumnWriteStoreV1(memPageStore, ParquetProperties.builder() .withPageSize(10000) .withDictionaryEncoding(false) .build()); final RecordConsumer recordWriter = columnIO.getRecordWriter(columns); final StructType thriftType = schemaConverter.toStructType(thriftClass); ParquetWriteProtocol parquetWriteProtocol = new ParquetWriteProtocol(recordWriter, columnIO, thriftType); expected.write(parquetWriteProtocol); recordWriter.flush(); columns.flush(); ThriftRecordConverter<T> converter = new TBaseRecordConverter<T>(thriftClass, schema, thriftType); final RecordReader<T> recordReader = columnIO.getRecordReader(memPageStore, converter); final T result = recordReader.read(); assertEquals(expected, result); }
Example #7
Source File: InternalParquetRecordWriter.java From parquet-mr with Apache License 2.0 | 5 votes |
private void initStore() { ColumnChunkPageWriteStore columnChunkPageWriteStore = new ColumnChunkPageWriteStore(compressor, schema, props.getAllocator(), props.getColumnIndexTruncateLength(), props.getPageWriteChecksumEnabled()); pageStore = columnChunkPageWriteStore; bloomFilterWriteStore = columnChunkPageWriteStore; columnStore = props.newColumnWriteStore(schema, pageStore, bloomFilterWriteStore); MessageColumnIO columnIO = new ColumnIOFactory(validating).getColumnIO(schema); this.recordConsumer = columnIO.getRecordWriter(columnStore); writeSupport.prepareForWrite(recordConsumer); }
Example #8
Source File: InternalParquetRecordReader.java From parquet-mr with Apache License 2.0 | 5 votes |
public void initialize(ParquetFileReader reader, ParquetReadOptions options) { // copy custom configuration to the Configuration passed to the ReadSupport Configuration conf = new Configuration(); if (options instanceof HadoopReadOptions) { conf = ((HadoopReadOptions) options).getConf(); } for (String property : options.getPropertyNames()) { conf.set(property, options.getProperty(property)); } // initialize a ReadContext for this file this.reader = reader; FileMetaData parquetFileMetadata = reader.getFooter().getFileMetaData(); this.fileSchema = parquetFileMetadata.getSchema(); Map<String, String> fileMetadata = parquetFileMetadata.getKeyValueMetaData(); ReadSupport.ReadContext readContext = readSupport.init(new InitContext(conf, toSetMultiMap(fileMetadata), fileSchema)); this.columnIOFactory = new ColumnIOFactory(parquetFileMetadata.getCreatedBy()); this.requestedSchema = readContext.getRequestedSchema(); this.columnCount = requestedSchema.getPaths().size(); // Setting the projection schema before running any filtering (e.g. getting filtered record count) // because projection impacts filtering reader.setRequestedSchema(requestedSchema); this.recordConverter = readSupport.prepareForRead(conf, fileMetadata, fileSchema, readContext); this.strictTypeChecking = options.isEnabled(STRICT_TYPE_CHECKING, true); this.total = reader.getFilteredRecordCount(); this.unmaterializableRecordCounter = new UnmaterializableRecordCounter(options, total); this.filterRecords = options.useRecordFilter(); LOG.info("RecordReader initialized will read a total of {} records.", total); }
Example #9
Source File: ParquetRecordReader.java From flink with Apache License 2.0 | 5 votes |
public void initialize(ParquetFileReader reader, Configuration configuration) { this.reader = reader; FileMetaData parquetFileMetadata = reader.getFooter().getFileMetaData(); // real schema of parquet file this.fileSchema = parquetFileMetadata.getSchema(); Map<String, String> fileMetadata = parquetFileMetadata.getKeyValueMetaData(); ReadSupport.ReadContext readContext = readSupport.init(new InitContext( configuration, toSetMultiMap(fileMetadata), readSchema)); this.columnIOFactory = new ColumnIOFactory(parquetFileMetadata.getCreatedBy()); this.recordMaterializer = readSupport.prepareForRead( configuration, fileMetadata, readSchema, readContext); this.numTotalRecords = reader.getRecordCount(); }
Example #10
Source File: SparkModelParser.java From ignite with Apache License 2.0 | 5 votes |
/** * Load logistic regression model. * * @param pathToMdl Path to model. * @param learningEnvironment Learning environment. */ private static Model loadLogRegModel(String pathToMdl, LearningEnvironment learningEnvironment) { Vector coefficients = null; double interceptor = 0; try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) { PageReadStore pages; final MessageType schema = r.getFooter().getFileMetaData().getSchema(); final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema); while (null != (pages = r.readNextRowGroup())) { final long rows = pages.getRowCount(); final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema)); for (int i = 0; i < rows; i++) { final SimpleGroup g = (SimpleGroup)recordReader.read(); interceptor = readInterceptor(g); coefficients = readCoefficients(g); } } } catch (IOException e) { String msg = "Error reading parquet file: " + e.getMessage(); learningEnvironment.logger().log(MLLogger.VerboseLevel.HIGH, msg); e.printStackTrace(); } return new LogisticRegressionModel(coefficients, interceptor); }
Example #11
Source File: SparkModelParser.java From ignite with Apache License 2.0 | 5 votes |
/** * Load linear regression model. * * @param pathToMdl Path to model. * @param learningEnvironment Learning environment. */ private static Model loadLinRegModel(String pathToMdl, LearningEnvironment learningEnvironment) { Vector coefficients = null; double interceptor = 0; try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) { PageReadStore pages; final MessageType schema = r.getFooter().getFileMetaData().getSchema(); final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema); while (null != (pages = r.readNextRowGroup())) { final long rows = pages.getRowCount(); final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema)); for (int i = 0; i < rows; i++) { final SimpleGroup g = (SimpleGroup)recordReader.read(); interceptor = readLinRegInterceptor(g); coefficients = readLinRegCoefficients(g); } } } catch (IOException e) { String msg = "Error reading parquet file: " + e.getMessage(); learningEnvironment.logger().log(MLLogger.VerboseLevel.HIGH, msg); e.printStackTrace(); } return new LinearRegressionModel(coefficients, interceptor); }
Example #12
Source File: SparkModelParser.java From ignite with Apache License 2.0 | 5 votes |
/** * Load Decision Tree model. * * @param pathToMdl Path to model. * @param learningEnvironment Learning environment. */ private static Model loadDecisionTreeModel(String pathToMdl, LearningEnvironment learningEnvironment) { try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) { PageReadStore pages; final MessageType schema = r.getFooter().getFileMetaData().getSchema(); final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema); final Map<Integer, NodeData> nodes = new TreeMap<>(); while (null != (pages = r.readNextRowGroup())) { final long rows = pages.getRowCount(); final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema)); for (int i = 0; i < rows; i++) { final SimpleGroup g = (SimpleGroup)recordReader.read(); NodeData nodeData = extractNodeDataFromParquetRow(g); nodes.put(nodeData.id, nodeData); } } return buildDecisionTreeModel(nodes); } catch (IOException e) { String msg = "Error reading parquet file: " + e.getMessage(); learningEnvironment.logger().log(MLLogger.VerboseLevel.HIGH, msg); e.printStackTrace(); } return null; }
Example #13
Source File: TestParquetWriteProtocol.java From parquet-mr with Apache License 2.0 | 5 votes |
private void validateThrift(String[] expectations, TBase<?, ?> a) throws TException { final ThriftSchemaConverter thriftSchemaConverter = new ThriftSchemaConverter(); // System.out.println(a); final Class<TBase<?,?>> class1 = (Class<TBase<?,?>>)a.getClass(); final MessageType schema = thriftSchemaConverter.convert(class1); LOG.info("{}", schema); final StructType structType = thriftSchemaConverter.toStructType(class1); ExpectationValidatingRecordConsumer recordConsumer = new ExpectationValidatingRecordConsumer(new ArrayDeque<String>(Arrays.asList(expectations))); final MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema); ParquetWriteProtocol p = new ParquetWriteProtocol(new RecordConsumerLoggingWrapper(recordConsumer), columnIO, structType); a.write(p); }
Example #14
Source File: ParquetRecordWriter.java From dremio-oss with Apache License 2.0 | 5 votes |
private void newSchema() throws IOException { // Reset it to half of current number and bound it within the limits recordCountForNextMemCheck = min(max(MINIMUM_RECORD_COUNT_FOR_CHECK, recordCountForNextMemCheck / 2), MAXIMUM_RECORD_COUNT_FOR_CHECK); String json = new Schema(batchSchema).toJson(); extraMetaData.put(DREMIO_ARROW_SCHEMA_2_1, json); schema = getParquetMessageType(batchSchema, "root"); int dictionarySize = (int)context.getOptions().getOption(ExecConstants.PARQUET_DICT_PAGE_SIZE_VALIDATOR); final ParquetProperties parquetProperties = ParquetProperties.builder() .withDictionaryPageSize(dictionarySize) .withWriterVersion(writerVersion) .withValuesWriterFactory(new DefaultV1ValuesWriterFactory()) .withDictionaryEncoding(enableDictionary) .withAllocator(new ParquetDirectByteBufferAllocator(columnEncoderAllocator)) .withPageSize(pageSize) .withAddPageHeadersToMetadata(true) .withEnableDictionarForBinaryType(enableDictionaryForBinary) .withPageRowCountLimit(Integer.MAX_VALUE) // Bug 16118 .build(); pageStore = ColumnChunkPageWriteStoreExposer.newColumnChunkPageWriteStore( toDeprecatedBytesCompressor(codecFactory.getCompressor(codec)), schema, parquetProperties); store = new ColumnWriteStoreV1(pageStore, parquetProperties); MessageColumnIO columnIO = new ColumnIOFactory(false).getColumnIO(this.schema); consumer = columnIO.getRecordWriter(store); setUp(schema, consumer); }
Example #15
Source File: ParquetFileLineFetcher.java From hugegraph-loader with Apache License 2.0 | 5 votes |
@Override public void openReader(Readable readable) { Path path = new Path(this.source().path()); try { HadoopInputFile file = HadoopInputFile.fromPath(path, this.conf); this.reader = ParquetFileReader.open(file); this.schema = this.reader.getFooter().getFileMetaData().getSchema(); this.columnIO = new ColumnIOFactory().getColumnIO(this.schema); } catch (IOException e) { throw new LoadException("Failed to open parquet reader for '%s'", e, readable); } this.resetOffset(); }
Example #16
Source File: ParquetRecordReader.java From flink with Apache License 2.0 | 5 votes |
public void initialize(ParquetFileReader reader, Configuration configuration) { this.reader = reader; FileMetaData parquetFileMetadata = reader.getFooter().getFileMetaData(); // real schema of parquet file this.fileSchema = parquetFileMetadata.getSchema(); Map<String, String> fileMetadata = parquetFileMetadata.getKeyValueMetaData(); ReadSupport.ReadContext readContext = readSupport.init(new InitContext( configuration, toSetMultiMap(fileMetadata), readSchema)); this.columnIOFactory = new ColumnIOFactory(parquetFileMetadata.getCreatedBy()); this.recordMaterializer = readSupport.prepareForRead( configuration, fileMetadata, readSchema, readContext); this.numTotalRecords = reader.getRecordCount(); }
Example #17
Source File: ParquetRecordWriter.java From Bats with Apache License 2.0 | 5 votes |
private void newSchema() throws IOException { List<Type> types = Lists.newArrayList(); for (MaterializedField field : batchSchema) { if (field.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) { continue; } types.add(getType(field)); } schema = new MessageType("root", types); // We don't want this number to be too small, ideally we divide the block equally across the columns. // It is unlikely all columns are going to be the same size. // Its value is likely below Integer.MAX_VALUE (2GB), although rowGroupSize is a long type. // Therefore this size is cast to int, since allocating byte array in under layer needs to // limit the array size in an int scope. int initialBlockBufferSize = this.schema.getColumns().size() > 0 ? max(MINIMUM_BUFFER_SIZE, blockSize / this.schema.getColumns().size() / 5) : MINIMUM_BUFFER_SIZE; // We don't want this number to be too small either. Ideally, slightly bigger than the page size, // but not bigger than the block buffer int initialPageBufferSize = max(MINIMUM_BUFFER_SIZE, min(pageSize + pageSize / 10, initialBlockBufferSize)); // TODO: Use initialSlabSize from ParquetProperties once drill will be updated to the latest version of Parquet library int initialSlabSize = CapacityByteArrayOutputStream.initialSlabSizeHeuristic(64, pageSize, 10); // TODO: Replace ParquetColumnChunkPageWriteStore with ColumnChunkPageWriteStore from parquet library // once PARQUET-1006 will be resolved pageStore = new ParquetColumnChunkPageWriteStore(codecFactory.getCompressor(codec), schema, initialSlabSize, pageSize, new ParquetDirectByteBufferAllocator(oContext)); ParquetProperties parquetProperties = ParquetProperties.builder() .withPageSize(pageSize) .withDictionaryEncoding(enableDictionary) .withDictionaryPageSize(initialPageBufferSize) .withWriterVersion(writerVersion) .withAllocator(new ParquetDirectByteBufferAllocator(oContext)) .withValuesWriterFactory(new DefaultV1ValuesWriterFactory()) .build(); store = new ColumnWriteStoreV1(pageStore, parquetProperties); MessageColumnIO columnIO = new ColumnIOFactory(false).getColumnIO(this.schema); consumer = columnIO.getRecordWriter(store); setUp(schema, consumer); }
Example #18
Source File: TupleConsumerPerfTest.java From parquet-mr with Apache License 2.0 | 4 votes |
private static MessageColumnIO newColumnFactory(String pigSchemaString) throws ParserException { MessageType schema = new PigSchemaConverter().convert(Utils.getSchemaFromString(pigSchemaString)); return new ColumnIOFactory().getColumnIO(schema); }
Example #19
Source File: ThriftBytesWriteSupport.java From parquet-mr with Apache License 2.0 | 4 votes |
@Override public void prepareForWrite(RecordConsumer recordConsumer) { final MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema); this.parquetWriteProtocol = new ParquetWriteProtocol(recordConsumer, columnIO, thriftStruct); thriftWriteSupport.prepareForWrite(recordConsumer); }
Example #20
Source File: AbstractThriftWriteSupport.java From parquet-mr with Apache License 2.0 | 4 votes |
@Override public void prepareForWrite(RecordConsumer recordConsumer) { final MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema); this.parquetWriteProtocol = new ParquetWriteProtocol(recordConsumer, columnIO, thriftStruct); }
Example #21
Source File: SparkModelParser.java From ignite with Apache License 2.0 | 4 votes |
/** * Load K-Means model. * * @param pathToMdl Path to model. * @param learningEnvironment learningEnvironment */ private static Model loadKMeansModel(String pathToMdl, LearningEnvironment learningEnvironment) { Vector[] centers = null; try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) { PageReadStore pages; final MessageType schema = r.getFooter().getFileMetaData().getSchema(); final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema); while (null != (pages = r.readNextRowGroup())) { final int rows = (int)pages.getRowCount(); final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema)); centers = new DenseVector[rows]; for (int i = 0; i < rows; i++) { final SimpleGroup g = (SimpleGroup)recordReader.read(); // final int clusterIdx = g.getInteger(0, 0); Group clusterCenterCoeff = g.getGroup(1, 0).getGroup(3, 0); final int amountOfCoefficients = clusterCenterCoeff.getFieldRepetitionCount(0); centers[i] = new DenseVector(amountOfCoefficients); for (int j = 0; j < amountOfCoefficients; j++) { double coefficient = clusterCenterCoeff.getGroup(0, j).getDouble(0, 0); centers[i].set(j, coefficient); } } } } catch (IOException e) { String msg = "Error reading parquet file: " + e.getMessage(); learningEnvironment.logger().log(MLLogger.VerboseLevel.HIGH, msg); e.printStackTrace(); } return new KMeansModel(centers, new EuclideanDistance()); }
Example #22
Source File: ParquetTypeUtils.java From presto with Apache License 2.0 | 4 votes |
public static MessageColumnIO getColumnIO(MessageType fileSchema, MessageType requestedSchema) { return (new ColumnIOFactory()).getColumnIO(requestedSchema, fileSchema, true); }
Example #23
Source File: ParquetTypeUtils.java From presto with Apache License 2.0 | 4 votes |
public static List<PrimitiveColumnIO> getColumns(MessageType fileSchema, MessageType requestedSchema) { return (new ColumnIOFactory()).getColumnIO(requestedSchema, fileSchema, true).getLeaves(); }