Java Code Examples for org.apache.flink.runtime.io.network.buffer.Buffer#DataType
The following examples show how to use
org.apache.flink.runtime.io.network.buffer.Buffer#DataType .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NettyMessage.java From flink with Apache License 2.0 | 6 votes |
private BufferResponse( @Nullable Buffer buffer, Buffer.DataType dataType, boolean isCompressed, int sequenceNumber, InputChannelID receiverId, int backlog, int bufferSize) { this.buffer = buffer; this.dataType = dataType; this.isCompressed = isCompressed; this.sequenceNumber = sequenceNumber; this.receiverId = checkNotNull(receiverId); this.backlog = backlog; this.bufferSize = bufferSize; }
Example 2
Source File: BufferReaderWriterUtil.java From flink with Apache License 2.0 | 6 votes |
@Nullable static Buffer sliceNextBuffer(ByteBuffer memory) { final int remaining = memory.remaining(); // we only check the correct case where data is exhausted // all other cases can only occur if our write logic is wrong and will already throw // buffer underflow exceptions which will cause the read to fail. if (remaining == 0) { return null; } final boolean isEvent = memory.getShort() == HEADER_VALUE_IS_EVENT; final boolean isCompressed = memory.getShort() == BUFFER_IS_COMPRESSED; final int size = memory.getInt(); memory.limit(memory.position() + size); ByteBuffer buf = memory.slice(); memory.position(memory.limit()); memory.limit(memory.capacity()); MemorySegment memorySegment = MemorySegmentFactory.wrapOffHeapMemory(buf); Buffer.DataType dataType = isEvent ? Buffer.DataType.EVENT_BUFFER : Buffer.DataType.DATA_BUFFER; return new NetworkBuffer(memorySegment, FreeingBufferRecycler.INSTANCE, dataType, isCompressed, size); }
Example 3
Source File: NettyMessage.java From flink with Apache License 2.0 | 5 votes |
/** * Parses the message header part and composes a new BufferResponse with an empty data buffer. The * data buffer will be filled in later. * * @param messageHeader the serialized message header. * @param bufferAllocator the allocator for network buffer. * @return a BufferResponse object with the header parsed and the data buffer to fill in later. The * data buffer will be null if the target channel has been released or the buffer size is 0. */ static BufferResponse readFrom(ByteBuf messageHeader, NetworkBufferAllocator bufferAllocator) { InputChannelID receiverId = InputChannelID.fromByteBuf(messageHeader); int sequenceNumber = messageHeader.readInt(); int backlog = messageHeader.readInt(); Buffer.DataType dataType = Buffer.DataType.values()[messageHeader.readByte()]; boolean isCompressed = messageHeader.readBoolean(); int size = messageHeader.readInt(); Buffer dataBuffer = null; if (size != 0) { if (dataType.isBuffer()) { dataBuffer = bufferAllocator.allocatePooledNetworkBuffer(receiverId); } else { dataBuffer = bufferAllocator.allocateUnPooledNetworkBuffer(size, dataType); } } if (dataBuffer != null) { dataBuffer.setCompressed(isCompressed); } return new BufferResponse( dataBuffer, dataType, isCompressed, sequenceNumber, receiverId, backlog, size); }
Example 4
Source File: BufferReaderWriterUtil.java From flink with Apache License 2.0 | 5 votes |
@Nullable static Buffer readFromByteChannel( FileChannel channel, ByteBuffer headerBuffer, MemorySegment memorySegment, BufferRecycler bufferRecycler) throws IOException { headerBuffer.clear(); if (!tryReadByteBuffer(channel, headerBuffer)) { return null; } headerBuffer.flip(); final ByteBuffer targetBuf; final boolean isEvent; final boolean isCompressed; final int size; try { isEvent = headerBuffer.getShort() == HEADER_VALUE_IS_EVENT; isCompressed = headerBuffer.getShort() == BUFFER_IS_COMPRESSED; size = headerBuffer.getInt(); targetBuf = memorySegment.wrap(0, size); } catch (BufferUnderflowException | IllegalArgumentException e) { // buffer underflow if header buffer is undersized // IllegalArgumentException if size is outside memory segment size throwCorruptDataException(); return null; // silence compiler } readByteBufferFully(channel, targetBuf); Buffer.DataType dataType = isEvent ? Buffer.DataType.EVENT_BUFFER : Buffer.DataType.DATA_BUFFER; return new NetworkBuffer(memorySegment, bufferRecycler, dataType, isCompressed, size); }
Example 5
Source File: PartitionRequestQueueTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests {@link PartitionRequestQueue#enqueueAvailableReader(NetworkSequenceViewReader)}, * verifying the reader would be enqueued in the pipeline after resuming data consumption if there * are credit and data available. */ @Test public void testEnqueueReaderByResumingConsumption() throws Exception { PipelinedSubpartition subpartition = PipelinedSubpartitionTest.createPipelinedSubpartition(); Buffer.DataType dataType1 = Buffer.DataType.ALIGNED_EXACTLY_ONCE_CHECKPOINT_BARRIER; Buffer.DataType dataType2 = Buffer.DataType.DATA_BUFFER; subpartition.add(createEventBufferConsumer(4096, dataType1)); subpartition.add(createEventBufferConsumer(4096, dataType2)); BufferAvailabilityListener bufferAvailabilityListener = new NoOpBufferAvailablityListener(); PipelinedSubpartitionView view = subpartition.createReadView(bufferAvailabilityListener); ResultPartitionProvider partitionProvider = (partitionId, index, availabilityListener) -> view; InputChannelID receiverId = new InputChannelID(); PartitionRequestQueue queue = new PartitionRequestQueue(); CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(receiverId, 0, queue); EmbeddedChannel channel = new EmbeddedChannel(queue); reader.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0); queue.notifyReaderCreated(reader); // we have adequate credits reader.addCredit(Integer.MAX_VALUE); assertTrue(reader.isAvailable()); reader.notifyDataAvailable(); channel.runPendingTasks(); assertFalse(reader.isAvailable()); assertEquals(1, subpartition.unsynchronizedGetNumberOfQueuedBuffers()); queue.addCreditOrResumeConsumption(receiverId, NetworkSequenceViewReader::resumeConsumption); assertFalse(reader.isAvailable()); assertEquals(0, subpartition.unsynchronizedGetNumberOfQueuedBuffers()); Object data1 = channel.readOutbound(); assertEquals(dataType1, ((NettyMessage.BufferResponse) data1).buffer.getDataType()); Object data2 = channel.readOutbound(); assertEquals(dataType2, ((NettyMessage.BufferResponse) data2).buffer.getDataType()); }
Example 6
Source File: NettyMessageClientDecoderDelegateTest.java From flink with Apache License 2.0 | 5 votes |
private void addBufferResponse( List<NettyMessage> messages, InputChannelID inputChannelId, Buffer.DataType dataType, int bufferSize, int seqNumber) { Buffer buffer = createDataBuffer(bufferSize, dataType); messages.add(new BufferResponse(buffer, seqNumber, inputChannelId, 1)); }
Example 7
Source File: NettyMessageClientDecoderDelegateTest.java From flink with Apache License 2.0 | 5 votes |
private Buffer createDataBuffer(int size, Buffer.DataType dataType) { MemorySegment segment = MemorySegmentFactory.allocateUnpooledSegment(size); NetworkBuffer buffer = new NetworkBuffer(segment, FreeingBufferRecycler.INSTANCE, dataType); for (int i = 0; i < size / 4; ++i) { buffer.writeInt(i); } return buffer; }
Example 8
Source File: NetworkBufferAllocator.java From flink with Apache License 2.0 | 3 votes |
/** * Allocates an un-pooled network buffer with the specific size. * * @param size The requested buffer size. * @param dataType The data type this buffer represents. * @return The un-pooled network buffer. */ Buffer allocateUnPooledNetworkBuffer(int size, Buffer.DataType dataType) { byte[] byteArray = new byte[size]; MemorySegment memSeg = MemorySegmentFactory.wrap(byteArray); return new NetworkBuffer(memSeg, FreeingBufferRecycler.INSTANCE, dataType); }