Java Code Examples for org.apache.parquet.column.page.PageReadStore#getRowCount()
The following examples show how to use
org.apache.parquet.column.page.PageReadStore#getRowCount() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ParquetReader.java From iceberg with Apache License 2.0 | 6 votes |
private void advance() { while (shouldSkip[nextRowGroup]) { nextRowGroup += 1; reader.skipNextRowGroup(); } PageReadStore pages; try { pages = reader.readNextRowGroup(); } catch (IOException e) { throw new RuntimeIOException(e); } nextRowGroupStart += pages.getRowCount(); nextRowGroup += 1; model.setPageSource(pages); }
Example 2
Source File: ParquetResolverTest.java From pxf with Apache License 2.0 | 6 votes |
@SuppressWarnings("deprecation") private List<Group> readParquetFile(String file, long expectedSize, MessageType schema) throws IOException { List<Group> result = new ArrayList<>(); String parquetFile = Objects.requireNonNull(getClass().getClassLoader().getResource("parquet/" + file)).getPath(); Path path = new Path(parquetFile); ParquetFileReader fileReader = new ParquetFileReader(new Configuration(), path, ParquetMetadataConverter.NO_FILTER); PageReadStore rowGroup; while ((rowGroup = fileReader.readNextRowGroup()) != null) { MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema); RecordReader<Group> recordReader = columnIO.getRecordReader(rowGroup, new GroupRecordConverter(schema)); long rowCount = rowGroup.getRowCount(); for (long i = 0; i < rowCount; i++) { result.add(recordReader.read()); } } fileReader.close(); assertEquals(expectedSize, result.size()); return result; }
Example 3
Source File: ParquetReader.java From iceberg with Apache License 2.0 | 6 votes |
private void advance() { while (shouldSkip[nextRowGroup]) { nextRowGroup += 1; reader.skipNextRowGroup(); } PageReadStore pages; try { pages = reader.readNextRowGroup(); } catch (IOException e) { throw new RuntimeIOException(e); } nextRowGroupStart += pages.getRowCount(); model.setPageSource(pages); }
Example 4
Source File: ParquetColumnarRowSplitReader.java From flink with Apache License 2.0 | 6 votes |
private void readNextRowGroup() throws IOException { PageReadStore pages = reader.readNextRowGroup(); if (pages == null) { throw new IOException("expecting more rows but reached last block. Read " + rowsReturned + " out of " + totalRowCount); } List<ColumnDescriptor> columns = requestedSchema.getColumns(); columnReaders = new AbstractColumnReader[columns.size()]; for (int i = 0; i < columns.size(); ++i) { columnReaders[i] = createColumnReader( utcTimestamp, selectedTypes[i], columns.get(i), pages.getPageReader(columns.get(i))); } totalCountLoadedSoFar += pages.getRowCount(); }
Example 5
Source File: FileEncodingsIT.java From parquet-mr with Apache License 2.0 | 6 votes |
public static void validatePages(Path file, List<?> expectedValues) throws IOException { List<PageReadStore> blockReaders = readBlocksFromFile(file); MessageType fileSchema = readSchemaFromFile(file); int rowGroupID = 0; int rowsRead = 0; for (PageReadStore pageReadStore : blockReaders) { for (ColumnDescriptor columnsDesc : fileSchema.getColumns()) { List<DataPage> pageGroup = getPageGroupForColumn(pageReadStore, columnsDesc); DictionaryPage dictPage = reusableCopy(getDictionaryPageForColumn(pageReadStore, columnsDesc)); List<?> expectedRowGroupValues = expectedValues.subList(rowsRead, (int)(rowsRead + pageReadStore.getRowCount())); validateFirstToLast(rowGroupID, dictPage, pageGroup, columnsDesc, expectedRowGroupValues); validateLastToFirst(rowGroupID, dictPage, pageGroup, columnsDesc, expectedRowGroupValues); } rowsRead += pageReadStore.getRowCount(); rowGroupID++; } }
Example 6
Source File: VectorizedParquetReader.java From iceberg with Apache License 2.0 | 5 votes |
private void advance() { while (shouldSkip[nextRowGroup]) { nextRowGroup += 1; reader.skipNextRowGroup(); } PageReadStore pages; try { pages = reader.readNextRowGroup(); } catch (IOException e) { throw new RuntimeIOException(e); } model.setRowGroupInfo(pages, columnChunkMetadata.get(nextRowGroup)); nextRowGroupStart += pages.getRowCount(); nextRowGroup += 1; }
Example 7
Source File: SparkModelParser.java From ignite with Apache License 2.0 | 5 votes |
/** * Load Decision Tree model. * * @param pathToMdl Path to model. * @param learningEnvironment Learning environment. */ private static Model loadDecisionTreeModel(String pathToMdl, LearningEnvironment learningEnvironment) { try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) { PageReadStore pages; final MessageType schema = r.getFooter().getFileMetaData().getSchema(); final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema); final Map<Integer, NodeData> nodes = new TreeMap<>(); while (null != (pages = r.readNextRowGroup())) { final long rows = pages.getRowCount(); final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema)); for (int i = 0; i < rows; i++) { final SimpleGroup g = (SimpleGroup)recordReader.read(); NodeData nodeData = extractNodeDataFromParquetRow(g); nodes.put(nodeData.id, nodeData); } } return buildDecisionTreeModel(nodes); } catch (IOException e) { String msg = "Error reading parquet file: " + e.getMessage(); learningEnvironment.logger().log(MLLogger.VerboseLevel.HIGH, msg); e.printStackTrace(); } return null; }
Example 8
Source File: SparkModelParser.java From ignite with Apache License 2.0 | 5 votes |
/** * Load SVM model. * * @param pathToMdl Path to model. * @param learningEnvironment Learning environment. */ private static Model loadLinearSVMModel(String pathToMdl, LearningEnvironment learningEnvironment) { Vector coefficients = null; double interceptor = 0; try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) { PageReadStore pages; final MessageType schema = r.getFooter().getFileMetaData().getSchema(); final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema); while (null != (pages = r.readNextRowGroup())) { final long rows = pages.getRowCount(); final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema)); for (int i = 0; i < rows; i++) { final SimpleGroup g = (SimpleGroup)recordReader.read(); interceptor = readSVMInterceptor(g); coefficients = readSVMCoefficients(g); } } } catch (IOException e) { String msg = "Error reading parquet file: " + e.getMessage(); learningEnvironment.logger().log(MLLogger.VerboseLevel.HIGH, msg); e.printStackTrace(); } return new SVMLinearClassificationModel(coefficients, interceptor); }
Example 9
Source File: SparkModelParser.java From ignite with Apache License 2.0 | 5 votes |
/** * Load linear regression model. * * @param pathToMdl Path to model. * @param learningEnvironment Learning environment. */ private static Model loadLinRegModel(String pathToMdl, LearningEnvironment learningEnvironment) { Vector coefficients = null; double interceptor = 0; try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) { PageReadStore pages; final MessageType schema = r.getFooter().getFileMetaData().getSchema(); final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema); while (null != (pages = r.readNextRowGroup())) { final long rows = pages.getRowCount(); final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema)); for (int i = 0; i < rows; i++) { final SimpleGroup g = (SimpleGroup)recordReader.read(); interceptor = readLinRegInterceptor(g); coefficients = readLinRegCoefficients(g); } } } catch (IOException e) { String msg = "Error reading parquet file: " + e.getMessage(); learningEnvironment.logger().log(MLLogger.VerboseLevel.HIGH, msg); e.printStackTrace(); } return new LinearRegressionModel(coefficients, interceptor); }
Example 10
Source File: SparkModelParser.java From ignite with Apache License 2.0 | 5 votes |
/** * Load logistic regression model. * * @param pathToMdl Path to model. * @param learningEnvironment Learning environment. */ private static Model loadLogRegModel(String pathToMdl, LearningEnvironment learningEnvironment) { Vector coefficients = null; double interceptor = 0; try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) { PageReadStore pages; final MessageType schema = r.getFooter().getFileMetaData().getSchema(); final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema); while (null != (pages = r.readNextRowGroup())) { final long rows = pages.getRowCount(); final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema)); for (int i = 0; i < rows; i++) { final SimpleGroup g = (SimpleGroup)recordReader.read(); interceptor = readInterceptor(g); coefficients = readCoefficients(g); } } } catch (IOException e) { String msg = "Error reading parquet file: " + e.getMessage(); learningEnvironment.logger().log(MLLogger.VerboseLevel.HIGH, msg); e.printStackTrace(); } return new LogisticRegressionModel(coefficients, interceptor); }
Example 11
Source File: InternalParquetRecordReader.java From tajo with Apache License 2.0 | 5 votes |
private void checkRead() throws IOException { if (current == totalCountLoadedSoFar) { if (current != 0) { totalTimeSpentProcessingRecords += (System.currentTimeMillis() - startedAssemblingCurrentBlockAt); if (Log.DEBUG) { LOG.debug("Assembled and processed " + totalCountLoadedSoFar + " records from " + columnCount + " columns in " + totalTimeSpentProcessingRecords + " ms: "+((float)totalCountLoadedSoFar / totalTimeSpentProcessingRecords) + " rec/ms, " + ((float)totalCountLoadedSoFar * columnCount / totalTimeSpentProcessingRecords) + " cell/ms"); final long totalTime = totalTimeSpentProcessingRecords + totalTimeSpentReadingBytes; if (totalTime != 0) { final long percentReading = 100 * totalTimeSpentReadingBytes / totalTime; final long percentProcessing = 100 * totalTimeSpentProcessingRecords / totalTime; LOG.debug("time spent so far " + percentReading + "% reading ("+totalTimeSpentReadingBytes+" ms) and " + percentProcessing + "% processing ("+totalTimeSpentProcessingRecords+" ms)"); } } } if (Log.DEBUG) LOG.debug("at row " + current + ". reading next block"); long t0 = System.currentTimeMillis(); PageReadStore pages = reader.readNextRowGroup(); if (pages == null) { throw new IOException("expecting more rows but reached last block. Read " + current + " out of " + total); } long timeSpentReading = System.currentTimeMillis() - t0; totalTimeSpentReadingBytes += timeSpentReading; BenchmarkCounter.incrementTime(timeSpentReading); if (Log.INFO) LOG.info("block read in memory in " + timeSpentReading + " ms. row count = " + pages.getRowCount()); if (Log.DEBUG) LOG.debug("initializing Record assembly with requested schema " + requestedSchema); MessageColumnIO columnIO = columnIOFactory.getColumnIO(requestedSchema, fileSchema, strictTypeChecking); recordReader = columnIO.getRecordReader(pages, recordConverter, filter); startedAssemblingCurrentBlockAt = System.currentTimeMillis(); totalCountLoadedSoFar += pages.getRowCount(); ++ currentBlock; } }
Example 12
Source File: InternalParquetRecordReader.java From parquet-mr with Apache License 2.0 | 5 votes |
private void checkRead() throws IOException { if (current == totalCountLoadedSoFar) { if (current != 0) { totalTimeSpentProcessingRecords += (System.currentTimeMillis() - startedAssemblingCurrentBlockAt); if (LOG.isInfoEnabled()) { LOG.info("Assembled and processed " + totalCountLoadedSoFar + " records from " + columnCount + " columns in " + totalTimeSpentProcessingRecords + " ms: "+((float)totalCountLoadedSoFar / totalTimeSpentProcessingRecords) + " rec/ms, " + ((float)totalCountLoadedSoFar * columnCount / totalTimeSpentProcessingRecords) + " cell/ms"); final long totalTime = totalTimeSpentProcessingRecords + totalTimeSpentReadingBytes; if (totalTime != 0) { final long percentReading = 100 * totalTimeSpentReadingBytes / totalTime; final long percentProcessing = 100 * totalTimeSpentProcessingRecords / totalTime; LOG.info("time spent so far " + percentReading + "% reading ("+totalTimeSpentReadingBytes+" ms) and " + percentProcessing + "% processing ("+totalTimeSpentProcessingRecords+" ms)"); } } } LOG.info("at row " + current + ". reading next block"); long t0 = System.currentTimeMillis(); PageReadStore pages = reader.readNextFilteredRowGroup(); if (pages == null) { throw new IOException("expecting more rows but reached last block. Read " + current + " out of " + total); } long timeSpentReading = System.currentTimeMillis() - t0; totalTimeSpentReadingBytes += timeSpentReading; BenchmarkCounter.incrementTime(timeSpentReading); if (LOG.isInfoEnabled()) LOG.info("block read in memory in {} ms. row count = {}", timeSpentReading, pages.getRowCount()); LOG.debug("initializing Record assembly with requested schema {}", requestedSchema); MessageColumnIO columnIO = columnIOFactory.getColumnIO(requestedSchema, fileSchema, strictTypeChecking); recordReader = columnIO.getRecordReader(pages, recordConverter, filterRecords ? filter : FilterCompat.NOOP); startedAssemblingCurrentBlockAt = System.currentTimeMillis(); totalCountLoadedSoFar += pages.getRowCount(); ++ currentBlock; } }
Example 13
Source File: SparkModelParser.java From ignite with Apache License 2.0 | 4 votes |
/** * Load K-Means model. * * @param pathToMdl Path to model. * @param learningEnvironment learningEnvironment */ private static Model loadKMeansModel(String pathToMdl, LearningEnvironment learningEnvironment) { Vector[] centers = null; try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) { PageReadStore pages; final MessageType schema = r.getFooter().getFileMetaData().getSchema(); final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema); while (null != (pages = r.readNextRowGroup())) { final int rows = (int)pages.getRowCount(); final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema)); centers = new DenseVector[rows]; for (int i = 0; i < rows; i++) { final SimpleGroup g = (SimpleGroup)recordReader.read(); // final int clusterIdx = g.getInteger(0, 0); Group clusterCenterCoeff = g.getGroup(1, 0).getGroup(3, 0); final int amountOfCoefficients = clusterCenterCoeff.getFieldRepetitionCount(0); centers[i] = new DenseVector(amountOfCoefficients); for (int j = 0; j < amountOfCoefficients; j++) { double coefficient = clusterCenterCoeff.getGroup(0, j).getDouble(0, 0); centers[i].set(j, coefficient); } } } } catch (IOException e) { String msg = "Error reading parquet file: " + e.getMessage(); learningEnvironment.logger().log(MLLogger.VerboseLevel.HIGH, msg); e.printStackTrace(); } return new KMeansModel(centers, new EuclideanDistance()); }