org.apache.parquet.hadoop.ParquetRecordReader Java Examples
The following examples show how to use
org.apache.parquet.hadoop.ParquetRecordReader.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PentahoApacheInputFormat.java From pentaho-hadoop-shims with Apache License 2.0 | 6 votes |
@Override public IPentahoRecordReader createRecordReader( IPentahoInputSplit split ) throws Exception { return inClassloader( () -> { PentahoInputSplitImpl pentahoInputSplit = (PentahoInputSplitImpl) split; InputSplit inputSplit = pentahoInputSplit.getInputSplit(); ReadSupport<RowMetaAndData> readSupport = new PentahoParquetReadSupport(); ParquetRecordReader<RowMetaAndData> nativeRecordReader = new ParquetRecordReader<>( readSupport, ParquetInputFormat.getFilter( job .getConfiguration() ) ); TaskAttemptContextImpl task = new TaskAttemptContextImpl( job.getConfiguration(), new TaskAttemptID() ); nativeRecordReader.initialize( inputSplit, task ); return new PentahoParquetRecordReader( nativeRecordReader ); } ); }
Example #2
Source File: DeprecatedParquetInputFormat.java From parquet-mr with Apache License 2.0 | 5 votes |
public RecordReaderWrapper( InputSplit oldSplit, JobConf oldJobConf, Reporter reporter) throws IOException { splitLen = oldSplit.getLength(); try { realReader = new ParquetRecordReader<V>( ParquetInputFormat.<V>getReadSupportInstance(oldJobConf), ParquetInputFormat.getFilter(oldJobConf)); if (oldSplit instanceof ParquetInputSplitWrapper) { realReader.initialize(((ParquetInputSplitWrapper) oldSplit).realSplit, oldJobConf, reporter); } else if (oldSplit instanceof FileSplit) { realReader.initialize((FileSplit) oldSplit, oldJobConf, reporter); } else { throw new IllegalArgumentException( "Invalid split (not a FileSplit or ParquetInputSplitWrapper): " + oldSplit); } // read once to gain access to key and value objects if (realReader.nextKeyValue()) { firstRecord = true; valueContainer = new Container<V>(); valueContainer.set(realReader.getCurrentValue()); } else { eof = true; } } catch (InterruptedException e) { Thread.interrupted(); throw new IOException(e); } }
Example #3
Source File: ParquetAsTextInputFormat.java From iow-hadoop-streaming with Apache License 2.0 | 5 votes |
public TextRecordReaderWrapper(ParquetInputFormat<SimpleGroup> newInputFormat, InputSplit oldSplit, JobConf oldJobConf, Reporter reporter) throws IOException { splitLen = oldSplit.getLength(); try { ReadSupport<SimpleGroup> rs = ParquetInputFormat.getReadSupportInstance(oldJobConf); realReader = new ParquetRecordReader<>(rs); realReader.initialize(((StreamingParquetInputSplitWrapper)oldSplit).realSplit, oldJobConf, reporter); oldJobConf.set("map.input.file",((StreamingParquetInputSplitWrapper)oldSplit).realSplit.getPath().toString()); oldJobConf.set("mapreduce.map.input.file",((StreamingParquetInputSplitWrapper)oldSplit).realSplit.getPath().toString()); // read once to gain access to key and value objects if (realReader.nextKeyValue()) { firstRecord = true; valueContainer = new Container<>(); SimpleGroup v = realReader.getCurrentValue(); valueContainer.set(v); ls = groupToStrings(v); } else { eof = true; } } catch (InterruptedException e) { Thread.interrupted(); throw new IOException(e); } }
Example #4
Source File: PentahoParquetRecordReader.java From pentaho-hadoop-shims with Apache License 2.0 | 4 votes |
public PentahoParquetRecordReader( ParquetRecordReader<RowMetaAndData> parquetReader ) { this.nativeParquetRecordReader = parquetReader; }