Java Code Examples for org.apache.flink.runtime.io.network.buffer.BufferConsumer#build()
The following examples show how to use
org.apache.flink.runtime.io.network.buffer.BufferConsumer#build() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AbstractCollectingResultPartitionWriter.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private void processBufferConsumers() throws IOException { while (!bufferConsumers.isEmpty()) { BufferConsumer bufferConsumer = bufferConsumers.peek(); Buffer buffer = bufferConsumer.build(); try { deserializeBuffer(buffer); if (!bufferConsumer.isFinished()) { break; } bufferConsumers.pop().close(); } finally { buffer.recycleBuffer(); } } }
Example 2
Source File: BoundedBlockingSubpartition.java From flink with Apache License 2.0 | 6 votes |
private void writeAndCloseBufferConsumer(BufferConsumer bufferConsumer) throws IOException { try { final Buffer buffer = bufferConsumer.build(); try { data.writeBuffer(buffer); numBuffersAndEventsWritten++; if (buffer.isBuffer()) { numDataBuffersWritten++; } } finally { buffer.recycleBuffer(); } } finally { bufferConsumer.close(); } }
Example 3
Source File: AbstractCollectingResultPartitionWriter.java From flink with Apache License 2.0 | 6 votes |
private void processBufferConsumers() throws IOException { while (!bufferConsumers.isEmpty()) { BufferConsumer bufferConsumer = bufferConsumers.peek(); Buffer buffer = bufferConsumer.build(); try { deserializeBuffer(buffer); if (!bufferConsumer.isFinished()) { break; } bufferConsumers.pop().close(); } finally { buffer.recycleBuffer(); } } }
Example 4
Source File: LocalInputChannel.java From flink with Apache License 2.0 | 6 votes |
@Override public boolean notifyPriorityEvent(BufferConsumer eventBufferConsumer) throws IOException { if (inputGate.getBufferReceivedListener() == null) { // in rare cases and very low checkpointing intervals, we may receive the first barrier, before setting // up CheckpointedInputGate return false; } Buffer buffer = eventBufferConsumer.build(); try { CheckpointBarrier event = parseCheckpointBarrierOrNull(buffer); if (event == null) { throw new IllegalStateException("Currently only checkpoint barriers are known priority events"); } else if (event.isCheckpoint()) { inputGate.getBufferReceivedListener().notifyBarrierReceived(event, channelInfo); } } finally { buffer.recycleBuffer(); } // already processed return true; }
Example 5
Source File: ChannelStateSerializerImplTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testReadToBufferBuilder() throws IOException { byte[] data = generateData(100); BufferBuilder bufferBuilder = new BufferBuilder(HeapMemorySegment.FACTORY.allocateUnpooledSegment(data.length, null), FreeingBufferRecycler.INSTANCE); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); new ChannelStateSerializerImpl().readData(new ByteArrayInputStream(data), wrap(bufferBuilder), Integer.MAX_VALUE); assertFalse(bufferBuilder.isFinished()); bufferBuilder.finish(); Buffer buffer = bufferConsumer.build(); assertEquals(data.length, buffer.readableBytes()); byte[] actual = new byte[buffer.readableBytes()]; buffer.asByteBuf().readBytes(actual); assertArrayEquals(data, actual); }
Example 6
Source File: AbstractCollectingResultPartitionWriter.java From flink with Apache License 2.0 | 6 votes |
private void processBufferConsumers() throws IOException { while (!bufferConsumers.isEmpty()) { BufferConsumer bufferConsumer = bufferConsumers.peek(); Buffer buffer = bufferConsumer.build(); try { deserializeBuffer(buffer); if (!bufferConsumer.isFinished()) { break; } bufferConsumers.pop().close(); } finally { buffer.recycleBuffer(); } } }
Example 7
Source File: SpillableSubpartition.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@VisibleForTesting long spillFinishedBufferConsumers(boolean forceFinishRemainingBuffers) throws IOException { long spilledBytes = 0; while (!buffers.isEmpty()) { BufferConsumer bufferConsumer = buffers.getFirst(); Buffer buffer = bufferConsumer.build(); updateStatistics(buffer); int bufferSize = buffer.getSize(); spilledBytes += bufferSize; // NOTE we may be in the process of finishing the subpartition where any buffer should // be treated as if it was finished! if (bufferConsumer.isFinished() || forceFinishRemainingBuffers) { if (bufferSize > 0) { spillWriter.writeBlock(buffer); } else { // If we skip a buffer for the spill writer, we need to adapt the backlog accordingly decreaseBuffersInBacklog(buffer); buffer.recycleBuffer(); } bufferConsumer.close(); buffers.poll(); } else { // If there is already data, we need to spill it anyway, since we do not get this // slice from the buffer consumer again during the next build. // BEWARE: by doing so, we increase the actual number of buffers in the spill writer! if (bufferSize > 0) { spillWriter.writeBlock(buffer); increaseBuffersInBacklog(bufferConsumer); } else { buffer.recycleBuffer(); } return spilledBytes; } } return spilledBytes; }
Example 8
Source File: InputGateConcurrentTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override void addBufferConsumer(BufferConsumer bufferConsumer) throws Exception { try { Buffer buffer = bufferConsumer.build(); checkState(bufferConsumer.isFinished(), "Handling of non finished buffers is not yet implemented"); channel.onBuffer(buffer, seq++, -1); } finally { bufferConsumer.close(); } }
Example 9
Source File: BoundedBlockingSubpartition.java From flink with Apache License 2.0 | 5 votes |
private void writeAndCloseBufferConsumer(BufferConsumer bufferConsumer) throws IOException { try { final Buffer buffer = bufferConsumer.build(); try { if (canBeCompressed(buffer)) { final Buffer compressedBuffer = parent.bufferCompressor.compressToIntermediateBuffer(buffer); data.writeBuffer(compressedBuffer); if (compressedBuffer != buffer) { compressedBuffer.recycleBuffer(); } } else { data.writeBuffer(buffer); } numBuffersAndEventsWritten++; if (buffer.isBuffer()) { numDataBuffersWritten++; } } finally { buffer.recycleBuffer(); } } finally { bufferConsumer.close(); } }
Example 10
Source File: BroadcastRecordWriterTest.java From flink with Apache License 2.0 | 5 votes |
public void closeConsumer(KeepingPartitionWriter partitionWriter, int subpartitionIndex, int expectedSize) { BufferConsumer bufferConsumer = partitionWriter.getAddedBufferConsumers(subpartitionIndex).get(0); Buffer buffer = bufferConsumer.build(); bufferConsumer.close(); assertEquals(expectedSize, buffer.getSize()); buffer.recycleBuffer(); }
Example 11
Source File: StreamTaskNetworkInputTest.java From flink with Apache License 2.0 | 5 votes |
private BufferOrEvent createDataBuffer() throws IOException { BufferBuilder bufferBuilder = BufferBuilderTestUtils.createEmptyBufferBuilder(PAGE_SIZE); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); serializeRecord(42L, bufferBuilder); serializeRecord(44L, bufferBuilder); return new BufferOrEvent(bufferConsumer.build(), new InputChannelInfo(0, 0), false); }
Example 12
Source File: PipelinedSubpartition.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Nullable BufferAndBacklog pollBuffer() { synchronized (buffers) { Buffer buffer = null; if (buffers.isEmpty()) { flushRequested = false; } while (!buffers.isEmpty()) { BufferConsumer bufferConsumer = buffers.peek(); buffer = bufferConsumer.build(); checkState(bufferConsumer.isFinished() || buffers.size() == 1, "When there are multiple buffers, an unfinished bufferConsumer can not be at the head of the buffers queue."); if (buffers.size() == 1) { // turn off flushRequested flag if we drained all of the available data flushRequested = false; } if (bufferConsumer.isFinished()) { buffers.pop().close(); decreaseBuffersInBacklogUnsafe(bufferConsumer.isBuffer()); } if (buffer.readableBytes() > 0) { break; } buffer.recycleBuffer(); buffer = null; if (!bufferConsumer.isFinished()) { break; } } if (buffer == null) { return null; } updateStatistics(buffer); // Do not report last remaining buffer on buffers as available to read (assuming it's unfinished). // It will be reported for reading either on flush or when the number of buffers in the queue // will be 2 or more. return new BufferAndBacklog( buffer, isAvailableUnsafe(), getBuffersInBacklog(), nextBufferIsEventUnsafe()); } }
Example 13
Source File: PipelinedSubpartition.java From flink with Apache License 2.0 | 4 votes |
@Nullable BufferAndBacklog pollBuffer() { synchronized (buffers) { Buffer buffer = null; if (buffers.isEmpty()) { flushRequested = false; } while (!buffers.isEmpty()) { BufferConsumer bufferConsumer = buffers.peek(); buffer = bufferConsumer.build(); checkState(bufferConsumer.isFinished() || buffers.size() == 1, "When there are multiple buffers, an unfinished bufferConsumer can not be at the head of the buffers queue."); if (buffers.size() == 1) { // turn off flushRequested flag if we drained all of the available data flushRequested = false; } if (bufferConsumer.isFinished()) { buffers.pop().close(); decreaseBuffersInBacklogUnsafe(bufferConsumer.isBuffer()); } if (buffer.readableBytes() > 0) { break; } buffer.recycleBuffer(); buffer = null; if (!bufferConsumer.isFinished()) { break; } } if (buffer == null) { return null; } updateStatistics(buffer); // Do not report last remaining buffer on buffers as available to read (assuming it's unfinished). // It will be reported for reading either on flush or when the number of buffers in the queue // will be 2 or more. return new BufferAndBacklog( buffer, isAvailableUnsafe(), getBuffersInBacklog(), nextBufferIsEventUnsafe()); } }
Example 14
Source File: PipelinedSubpartition.java From flink with Apache License 2.0 | 4 votes |
@Nullable BufferAndBacklog pollBuffer() { synchronized (buffers) { if (isBlockedByCheckpoint) { return null; } Buffer buffer = null; if (buffers.isEmpty()) { flushRequested = false; } while (!buffers.isEmpty()) { BufferConsumer bufferConsumer = buffers.peek(); buffer = bufferConsumer.build(); checkState(bufferConsumer.isFinished() || buffers.size() == 1, "When there are multiple buffers, an unfinished bufferConsumer can not be at the head of the buffers queue."); if (buffers.size() == 1) { // turn off flushRequested flag if we drained all of the available data flushRequested = false; } if (bufferConsumer.isFinished()) { buffers.pop().close(); decreaseBuffersInBacklogUnsafe(bufferConsumer.isBuffer()); } if (buffer.readableBytes() > 0) { break; } buffer.recycleBuffer(); buffer = null; if (!bufferConsumer.isFinished()) { break; } } if (buffer == null) { return null; } if (buffer.getDataType().isBlockingUpstream()) { isBlockedByCheckpoint = true; } updateStatistics(buffer); // Do not report last remaining buffer on buffers as available to read (assuming it's unfinished). // It will be reported for reading either on flush or when the number of buffers in the queue // will be 2 or more. return new BufferAndBacklog( buffer, isDataAvailableUnsafe(), getBuffersInBacklog(), isEventAvailableUnsafe()); } }