Java Code Examples for org.apache.parquet.hadoop.metadata.BlockMetaData#getCompressedSize()
The following examples show how to use
org.apache.parquet.hadoop.metadata.BlockMetaData#getCompressedSize() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ParquetInputFormat.java From parquet-mr with Apache License 2.0 | 6 votes |
/** * @param rowGroupMetadata * @return true if the mid point of row group is in a new hdfs block, and also move the currentHDFSBlock pointer to the correct index that contains the row group; * return false if the mid point of row group is in the same hdfs block */ private boolean checkBelongingToANewHDFSBlock(BlockMetaData rowGroupMetadata) { boolean isNewHdfsBlock = false; long rowGroupMidPoint = rowGroupMetadata.getStartingPos() + (rowGroupMetadata.getCompressedSize() / 2); //if mid point is not in the current HDFS block any more, return true while (rowGroupMidPoint > getHDFSBlockEndingPosition(currentMidPointHDFSBlockIndex)) { isNewHdfsBlock = true; currentMidPointHDFSBlockIndex++; if (currentMidPointHDFSBlockIndex >= hdfsBlocks.length) throw new ParquetDecodingException("the row group is not in hdfs blocks in the file: midpoint of row groups is " + rowGroupMidPoint + ", the end of the hdfs block is " + getHDFSBlockEndingPosition(currentMidPointHDFSBlockIndex - 1)); } while (rowGroupMetadata.getStartingPos() > getHDFSBlockEndingPosition(currentStartHdfsBlockIndex)) { currentStartHdfsBlockIndex++; if (currentStartHdfsBlockIndex >= hdfsBlocks.length) throw new ParquetDecodingException("The row group does not start in this file: row group offset is " + rowGroupMetadata.getStartingPos() + " but the end of hdfs blocks of file is " + getHDFSBlockEndingPosition(currentStartHdfsBlockIndex)); } return isNewHdfsBlock; }
Example 2
Source File: UnifiedParquetReader.java From dremio-oss with Apache License 2.0 | 5 votes |
private void computeLocality(ParquetMetadata footer) throws ExecutionSetupException { try { BlockMetaData block = footer.getBlocks().get(readEntry.getRowGroupIndex()); Iterable<FileBlockLocation> blockLocations = fs.getFileBlockLocations(Path.of(readEntry.getPath()), block.getStartingPos(), block.getCompressedSize()); String localHost = InetAddress.getLocalHost().getCanonicalHostName(); List<Range<Long>> intersectingRanges = new ArrayList<>(); Range<Long> rowGroupRange = Range.openClosed(block.getStartingPos(), block.getStartingPos() + block.getCompressedSize()); for (FileBlockLocation loc : blockLocations) { for (String host : loc.getHosts()) { if (host.equals(localHost)) { intersectingRanges.add(Range.closedOpen(loc.getOffset(), loc.getOffset() + loc.getSize()).intersection(rowGroupRange)); } } } long totalIntersect = 0; for (Range<Long> range : intersectingRanges) { totalIntersect += (range.upperEndpoint() - range.lowerEndpoint()); } if (totalIntersect < block.getCompressedSize()) { context.getStats().addLongStat(Metric.NUM_REMOTE_READERS, 1); } else { context.getStats().addLongStat(Metric.NUM_REMOTE_READERS, 0); } } catch (IOException e) { throw new ExecutionSetupException(e); } }
Example 3
Source File: SizeCommand.java From parquet-mr with Apache License 2.0 | 5 votes |
@Override public void execute(CommandLine options) throws Exception { super.execute(options); String[] args = options.getArgs(); String input = args[0]; out = new PrintWriter(Main.out, true); inputPath = new Path(input); conf = new Configuration(); inputFileStatuses = inputPath.getFileSystem(conf).globStatus(inputPath); long size = 0; for (FileStatus fs : inputFileStatuses) { long fileSize = 0; for (Footer f : ParquetFileReader.readFooters(conf, fs, false)) { for (BlockMetaData b : f.getParquetMetadata().getBlocks()) { size += (options.hasOption('u') ? b.getTotalByteSize() : b.getCompressedSize()); fileSize += (options.hasOption('u') ? b.getTotalByteSize() : b.getCompressedSize()); } } if (options.hasOption('d')) { if (options.hasOption('p')) { out.format("%s: %s\n", fs.getPath().getName(), getPrettySize(fileSize)); } else { out.format("%s: %d bytes\n", fs.getPath().getName(), fileSize); } } } if (options.hasOption('p')) { out.format("Total Size: %s", getPrettySize(size)); } else { out.format("Total Size: %d bytes", size); } out.println(); }
Example 4
Source File: ParquetMetadataCommand.java From parquet-mr with Apache License 2.0 | 5 votes |
private void printRowGroup(Logger console, int index, BlockMetaData rowGroup, MessageType schema) { long start = rowGroup.getStartingPos(); long rowCount = rowGroup.getRowCount(); long compressedSize = rowGroup.getCompressedSize(); long uncompressedSize = rowGroup.getTotalByteSize(); String filePath = rowGroup.getPath(); console.info(String.format("\nRow group %d: count: %d %s records start: %d total: %s%s\n%s", index, rowCount, humanReadable(((float) compressedSize) / rowCount), start, humanReadable(compressedSize), filePath != null ? " path: " + filePath : "", new TextStringBuilder(80).appendPadding(80, '-'))); int size = maxSize(Iterables.transform(rowGroup.getColumns(), new Function<ColumnChunkMetaData, String>() { @Override public String apply(@Nullable ColumnChunkMetaData input) { return input == null ? "" : input.getPath().toDotString(); } })); console.info(String.format("%-" + size + "s %-9s %-9s %-9s %-10s %-7s %s", "", "type", "encodings", "count", "avg size", "nulls", "min / max")); for (ColumnChunkMetaData column : rowGroup.getColumns()) { printColumnChunk(console, size, column, schema); } }
Example 5
Source File: ParquetInputFormat.java From parquet-mr with Apache License 2.0 | 4 votes |
private void addRowGroup(BlockMetaData rowGroup) { this.rowGroups.add(rowGroup); this.compressedByteSize += rowGroup.getCompressedSize(); }