Java Code Examples for io.airlift.slice.Slice#getInput()
The following examples show how to use
io.airlift.slice.Slice#getInput() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CompressedOrcChunkLoader.java From presto with Apache License 2.0 | 6 votes |
private void ensureCompressedBytesAvailable(int size) throws IOException { // is this within the current buffer? if (size <= compressedBufferStream.remaining()) { return; } // is this a read larger than the buffer if (size > dataReader.getMaxBufferSize()) { throw new OrcCorruptionException(dataReader.getOrcDataSourceId(), "Requested read size (%s bytes) is greater than max buffer size (%s bytes", size, dataReader.getMaxBufferSize()); } // is this a read past the end of the stream if (compressedBufferStart + compressedBufferStream.position() + size > dataReader.getSize()) { throw new OrcCorruptionException(dataReader.getOrcDataSourceId(), "Read past end of stream"); } compressedBufferStart = compressedBufferStart + toIntExact(compressedBufferStream.position()); Slice compressedBuffer = dataReader.seekBuffer(compressedBufferStart); dataReaderMemoryUsage.setBytes(dataReader.getRetainedSize()); if (compressedBuffer.length() < size) { throw new OrcCorruptionException(dataReader.getOrcDataSourceId(), "Requested read of %s bytes but only %s were bytes", size, compressedBuffer.length()); } compressedBufferStream = compressedBuffer.getInput(); }
Example 2
Source File: NumericHistogram.java From presto with Apache License 2.0 | 6 votes |
public NumericHistogram(Slice serialized, int buffer) { requireNonNull(serialized, "serialized is null"); checkArgument(buffer >= 1, "buffer must be >= 1"); SliceInput input = serialized.getInput(); checkArgument(input.readByte() == FORMAT_TAG, "Unsupported format tag"); maxBuckets = input.readInt(); nextIndex = input.readInt(); values = new double[maxBuckets + buffer]; weights = new double[maxBuckets + buffer]; input.readBytes(Slices.wrappedDoubleArray(values), nextIndex * SizeOf.SIZE_OF_DOUBLE); input.readBytes(Slices.wrappedDoubleArray(weights), nextIndex * SizeOf.SIZE_OF_DOUBLE); }
Example 3
Source File: SetDigest.java From presto with Apache License 2.0 | 6 votes |
public static SetDigest newInstance(Slice serialized) { requireNonNull(serialized, "serialized is null"); SliceInput input = serialized.getInput(); checkArgument(input.readByte() == UNCOMPRESSED_FORMAT, "Unexpected version"); int hllLength = input.readInt(); Slice serializedHll = Slices.allocate(hllLength); input.readBytes(serializedHll, hllLength); HyperLogLog hll = HyperLogLog.newInstance(serializedHll); Long2ShortRBTreeMap minhash = new Long2ShortRBTreeMap(); int maxHashes = input.readInt(); int minhashLength = input.readInt(); // The values are stored after the keys SliceInput valuesInput = serialized.getInput(); valuesInput.setPosition(input.position() + minhashLength * SIZE_OF_LONG); for (int i = 0; i < minhashLength; i++) { minhash.put(input.readLong(), valuesInput.readShort()); } return new SetDigest(maxHashes, hll, minhash); }
Example 4
Source File: StringClassifierAdapter.java From presto with Apache License 2.0 | 6 votes |
public static StringClassifierAdapter deserialize(byte[] data) { Slice slice = Slices.wrappedBuffer(data); BasicSliceInput input = slice.getInput(); int classifierLength = input.readInt(); Model classifier = ModelUtils.deserialize(input.readSlice(classifierLength)); int numEnumerations = input.readInt(); ImmutableMap.Builder<Integer, String> builder = ImmutableMap.builder(); for (int i = 0; i < numEnumerations; i++) { int key = input.readInt(); int valueLength = input.readInt(); String value = input.readSlice(valueLength).toStringUtf8(); builder.put(key, value); } return new StringClassifierAdapter((Classifier<Integer>) classifier, builder.build()); }
Example 5
Source File: ParquetCompressionUtils.java From presto with Apache License 2.0 | 6 votes |
private static Slice decompressGzip(Slice input, int uncompressedSize) throws IOException { if (uncompressedSize == 0) { return EMPTY_SLICE; } try (GZIPInputStream gzipInputStream = new GZIPInputStream(input.getInput(), min(GZIP_BUFFER_SIZE, input.length()))) { byte[] buffer = new byte[uncompressedSize]; int bytesRead = ByteStreams.read(gzipInputStream, buffer, 0, buffer.length); if (bytesRead != uncompressedSize) { throw new IllegalArgumentException(format("Invalid uncompressedSize for GZIP input. Expected %s, actual: %s", uncompressedSize, bytesRead)); } // Verify we're at EOF and aren't truncating the input checkArgument(gzipInputStream.read() == -1, "Invalid uncompressedSize for GZIP input. Actual size exceeds %s bytes", uncompressedSize); return wrappedBuffer(buffer, 0, bytesRead); } }
Example 6
Source File: RcFileReader.java From presto with Apache License 2.0 | 5 votes |
public void setBuffers(Slice lengthsBuffer, Slice dataBuffer, int uncompressedDataSize) { this.lengthsInput = lengthsBuffer.getInput(); this.dataBuffer = dataBuffer; this.uncompressedDataSize = uncompressedDataSize; compressed = (decompressor != null); currentPosition = 0; currentOffset = 0; runLength = 0; lastValueLength = 0; }
Example 7
Source File: JtsGeometrySerde.java From presto with Apache License 2.0 | 5 votes |
public static Geometry deserialize(Slice shape) { requireNonNull(shape, "shape is null"); BasicSliceInput input = shape.getInput(); verify(input.available() > 0); GeometrySerializationType type = GeometrySerializationType.getForCode(input.readByte()); return readGeometry(input, type); }
Example 8
Source File: GeometrySerde.java From presto with Apache License 2.0 | 5 votes |
public static GeometrySerializationType deserializeType(Slice shape) { requireNonNull(shape, "shape is null"); BasicSliceInput input = shape.getInput(); verify(input.available() > 0); return GeometrySerializationType.getForCode(input.readByte()); }
Example 9
Source File: GeometrySerde.java From presto with Apache License 2.0 | 5 votes |
public static OGCGeometry deserialize(Slice shape) { requireNonNull(shape, "shape is null"); BasicSliceInput input = shape.getInput(); verify(input.available() > 0); int length = input.available() - 1; GeometrySerializationType type = GeometrySerializationType.getForCode(input.readByte()); return readGeometry(input, shape, type, length); }
Example 10
Source File: GeometrySerde.java From presto with Apache License 2.0 | 5 votes |
@Nullable public static Envelope deserializeEnvelope(Slice shape) { requireNonNull(shape, "shape is null"); BasicSliceInput input = shape.getInput(); verify(input.available() > 0); int length = input.available() - 1; GeometrySerializationType type = GeometrySerializationType.getForCode(input.readByte()); return getEnvelope(input, type, length); }
Example 11
Source File: PrimitiveColumnReader.java From presto with Apache License 2.0 | 5 votes |
private LevelReader buildLevelRLEReader(int maxLevel, Slice slice) { if (maxLevel == 0) { return new LevelNullReader(); } return new LevelRLEReader(new RunLengthBitPackingHybridDecoder(BytesUtils.getWidthFromMaxInt(maxLevel), slice.getInput())); }
Example 12
Source File: ParquetColumnChunk.java From presto with Apache License 2.0 | 5 votes |
public ParquetColumnChunk( Optional<String> fileCreatedBy, ColumnChunkDescriptor descriptor, Slice data) { this.fileCreatedBy = requireNonNull(fileCreatedBy, "fileCreatedBy is null"); this.descriptor = descriptor; this.input = data.getInput(); }
Example 13
Source File: OrcInputStream.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
private void advance() throws IOException { if (compressedSliceInput == null || compressedSliceInput.remaining() == 0) { current = null; return; } // 3 byte header // NOTE: this must match BLOCK_HEADER_SIZE currentCompressedBlockOffset = toIntExact(compressedSliceInput.position()); int b0 = compressedSliceInput.readUnsignedByte(); int b1 = compressedSliceInput.readUnsignedByte(); int b2 = compressedSliceInput.readUnsignedByte(); boolean isUncompressed = (b0 & 0x01) == 1; int chunkLength = (b2 << 15) | (b1 << 7) | (b0 >>> 1); if (chunkLength < 0 || chunkLength > compressedSliceInput.remaining()) { throw new OrcCorruptionException(String.format("The chunkLength (%s) must not be negative or greater than remaining size (%s)", chunkLength, compressedSliceInput.remaining())); } Slice chunk = compressedSliceInput.readSlice(chunkLength); if (isUncompressed) { current = chunk.getInput(); } else { int uncompressedSize; if (compressionKind == ZLIB) { uncompressedSize = decompressZip(chunk); } else { uncompressedSize = decompressSnappy(chunk); } current = Slices.wrappedBuffer(buffer, 0, uncompressedSize).getInput(); } }
Example 14
Source File: OrcReader.java From spliceengine with GNU Affero General Public License v3.0 | 4 votes |
public OrcReader(OrcDataSource orcDataSource, MetadataReader metadataReader, DataSize maxMergeDistance, DataSize maxReadSize) throws IOException { orcDataSource = wrapWithCacheIfTiny(requireNonNull(orcDataSource, "orcDataSource is null"), maxMergeDistance); this.orcDataSource = orcDataSource; this.metadataReader = requireNonNull(metadataReader, "metadataReader is null"); this.maxMergeDistance = requireNonNull(maxMergeDistance, "maxMergeDistance is null"); this.maxReadSize = requireNonNull(maxReadSize, "maxReadSize is null"); // // Read the file tail: // // variable: Footer // variable: Metadata // variable: PostScript - contains length of footer and metadata // 3 bytes: file magic "ORC" // 1 byte: postScriptSize = PostScript + Magic // figure out the size of the file using the option or filesystem long size = orcDataSource.getSize(); if (size <= 0) { throw new OrcCorruptionException("Malformed ORC file %s. Invalid file size %s", orcDataSource, size); } // Read the tail of the file byte[] buffer = new byte[toIntExact(min(size, EXPECTED_FOOTER_SIZE))]; orcDataSource.readFully(size - buffer.length, buffer); // get length of PostScript - last byte of the file int postScriptSize = buffer[buffer.length - SIZE_OF_BYTE] & 0xff; // make sure this is an ORC file and not an RCFile or something else verifyOrcFooter(orcDataSource, postScriptSize, buffer); // decode the post script int postScriptOffset = buffer.length - SIZE_OF_BYTE - postScriptSize; PostScript postScript = metadataReader.readPostScript(buffer, postScriptOffset, postScriptSize); // verify this is a supported version checkOrcVersion(orcDataSource, postScript.getVersion()); // check compression codec is supported this.compressionKind = postScript.getCompression(); this.hiveWriterVersion = postScript.getHiveWriterVersion(); this.bufferSize = toIntExact(postScript.getCompressionBlockSize()); int footerSize = toIntExact(postScript.getFooterLength()); int metadataSize = toIntExact(postScript.getMetadataLength()); // check if extra bytes need to be read Slice completeFooterSlice; int completeFooterSize = footerSize + metadataSize + postScriptSize + SIZE_OF_BYTE; if (completeFooterSize > buffer.length) { // allocate a new buffer large enough for the complete footer byte[] newBuffer = new byte[completeFooterSize]; completeFooterSlice = Slices.wrappedBuffer(newBuffer); // initial read was not large enough, so read missing section orcDataSource.readFully(size - completeFooterSize, newBuffer, 0, completeFooterSize - buffer.length); // copy already read bytes into the new buffer completeFooterSlice.setBytes(completeFooterSize - buffer.length, buffer); } else { // footer is already in the bytes in buffer, just adjust position, length completeFooterSlice = Slices.wrappedBuffer(buffer, buffer.length - completeFooterSize, completeFooterSize); } // read metadata Slice metadataSlice = completeFooterSlice.slice(0, metadataSize); try (InputStream metadataInputStream = new OrcInputStream(orcDataSource.toString(), metadataSlice.getInput(), compressionKind, bufferSize, new AggregatedMemoryContext())) { this.metadata = metadataReader.readMetadata(hiveWriterVersion, metadataInputStream); } // read footer Slice footerSlice = completeFooterSlice.slice(metadataSize, footerSize); try (InputStream footerInputStream = new OrcInputStream(orcDataSource.toString(), footerSlice.getInput(), compressionKind, bufferSize, new AggregatedMemoryContext())) { this.footer = metadataReader.readFooter(hiveWriterVersion, footerInputStream); } }