Java Code Examples for org.apache.flink.runtime.io.network.buffer.BufferBuilder#createBufferConsumer()
The following examples show how to use
org.apache.flink.runtime.io.network.buffer.BufferBuilder#createBufferConsumer() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BroadcastRecordWriter.java From flink with Apache License 2.0 | 6 votes |
/** * The request could be from broadcast or non-broadcast modes like {@link #randomEmit(IOReadableWritable)}. * * <p>For non-broadcast, the created {@link BufferConsumer} is only for the target channel. * * <p>For broadcast, all the channels share the same requested {@link BufferBuilder} and the created * {@link BufferConsumer} is copied for every channel. */ @Override public BufferBuilder requestNewBufferBuilder(int targetChannel) throws IOException, InterruptedException { checkState(bufferBuilder == null || bufferBuilder.isFinished()); BufferBuilder builder = super.requestNewBufferBuilder(targetChannel); if (randomTriggered) { addBufferConsumer(randomTriggeredConsumer = builder.createBufferConsumer(), targetChannel); } else { try (BufferConsumer bufferConsumer = builder.createBufferConsumer()) { for (int channel = 0; channel < numberOfChannels; channel++) { addBufferConsumer(bufferConsumer.copy(), channel); } } } bufferBuilder = builder; return builder; }
Example 2
Source File: PipelinedSubpartition.java From flink with Apache License 2.0 | 6 votes |
@Override public void readRecoveredState(ChannelStateReader stateReader) throws IOException, InterruptedException { boolean recycleBuffer = true; for (ReadResult readResult = ReadResult.HAS_MORE_DATA; readResult == ReadResult.HAS_MORE_DATA;) { BufferBuilder bufferBuilder = parent.getBufferPool().requestBufferBuilderBlocking(subpartitionInfo.getSubPartitionIdx()); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); try { readResult = stateReader.readOutputData(subpartitionInfo, bufferBuilder); // check whether there are some states data filled in this time if (bufferConsumer.isDataAvailable()) { add(bufferConsumer, false, false); recycleBuffer = false; bufferBuilder.finish(); } } finally { if (recycleBuffer) { bufferConsumer.close(); } } } }
Example 3
Source File: ChannelStateSerializerImplTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testReadToBufferBuilder() throws IOException { byte[] data = generateData(100); BufferBuilder bufferBuilder = new BufferBuilder(HeapMemorySegment.FACTORY.allocateUnpooledSegment(data.length, null), FreeingBufferRecycler.INSTANCE); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); new ChannelStateSerializerImpl().readData(new ByteArrayInputStream(data), wrap(bufferBuilder), Integer.MAX_VALUE); assertFalse(bufferBuilder.isFinished()); bufferBuilder.finish(); Buffer buffer = bufferConsumer.build(); assertEquals(data.length, buffer.readableBytes()); byte[] actual = new byte[buffer.readableBytes()]; buffer.asByteBuf().readBytes(actual); assertArrayEquals(data, actual); }
Example 4
Source File: SpanningRecordSerializationTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private static BufferAndSerializerResult setNextBufferForSerializer( RecordSerializer<SerializationTestType> serializer, int segmentSize) throws IOException { // create a bufferBuilder with some random starting offset to properly test handling buffer slices in the // deserialization code. int startingOffset = segmentSize > 2 ? RANDOM.nextInt(segmentSize / 2) : 0; BufferBuilder bufferBuilder = createFilledBufferBuilder(segmentSize + startingOffset, startingOffset); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); bufferConsumer.build().recycleBuffer(); return new BufferAndSerializerResult( bufferBuilder, bufferConsumer, serializer.copyToBufferBuilder(bufferBuilder)); }
Example 5
Source File: LocalInputChannelTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public BufferConsumerAndChannel getNextBufferConsumer() throws Exception { if (channelIndexes.size() > 0) { final int channelIndex = channelIndexes.remove(0); BufferBuilder bufferBuilder = bufferProvider.requestBufferBuilderBlocking(); bufferBuilder.appendAndCommit(ByteBuffer.wrap(new byte[4])); bufferBuilder.finish(); return new BufferConsumerAndChannel(bufferBuilder.createBufferConsumer(), channelIndex); } return null; }
Example 6
Source File: SpanningRecordSerializationTest.java From flink with Apache License 2.0 | 5 votes |
private static BufferAndSerializerResult setNextBufferForSerializer( RecordSerializer<SerializationTestType> serializer, int segmentSize) throws IOException { // create a bufferBuilder with some random starting offset to properly test handling buffer slices in the // deserialization code. int startingOffset = segmentSize > 2 ? RANDOM.nextInt(segmentSize / 2) : 0; BufferBuilder bufferBuilder = createFilledBufferBuilder(segmentSize + startingOffset, startingOffset); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); bufferConsumer.build().recycleBuffer(); return new BufferAndSerializerResult( bufferBuilder, bufferConsumer, serializer.copyToBufferBuilder(bufferBuilder)); }
Example 7
Source File: LocalInputChannelTest.java From flink with Apache License 2.0 | 5 votes |
@Override public BufferConsumerAndChannel getNextBufferConsumer() throws Exception { if (channelIndexes.size() > 0) { final int channelIndex = channelIndexes.remove(0); BufferBuilder bufferBuilder = bufferProvider.requestBufferBuilderBlocking(); bufferBuilder.appendAndCommit(ByteBuffer.wrap(new byte[4])); bufferBuilder.finish(); return new BufferConsumerAndChannel(bufferBuilder.createBufferConsumer(), channelIndex); } return null; }
Example 8
Source File: SpanningRecordSerializationTest.java From flink with Apache License 2.0 | 5 votes |
private static Buffer appendLeftOverBytes(Buffer buffer, byte[] leftOverBytes) { BufferBuilder bufferBuilder = new BufferBuilder( MemorySegmentFactory.allocateUnpooledSegment(buffer.readableBytes() + leftOverBytes.length), FreeingBufferRecycler.INSTANCE); try (BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer()) { bufferBuilder.append(buffer.getNioBufferReadable()); bufferBuilder.appendAndCommit(ByteBuffer.wrap(leftOverBytes)); return bufferConsumer.build(); } }
Example 9
Source File: SpanningRecordSerializationTest.java From flink with Apache License 2.0 | 5 votes |
private static BufferAndSerializerResult setNextBufferForSerializer( RecordSerializer<SerializationTestType> serializer, int segmentSize) throws IOException { // create a bufferBuilder with some random starting offset to properly test handling buffer slices in the // deserialization code. int startingOffset = segmentSize > 2 ? RANDOM.nextInt(segmentSize / 2) : 0; BufferBuilder bufferBuilder = createFilledBufferBuilder(segmentSize + startingOffset, startingOffset); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); bufferConsumer.build().recycleBuffer(); return new BufferAndSerializerResult( bufferBuilder, bufferConsumer, serializer.copyToBufferBuilder(bufferBuilder)); }
Example 10
Source File: LocalInputChannelTest.java From flink with Apache License 2.0 | 5 votes |
@Override public BufferConsumerAndChannel getNextBufferConsumer() throws Exception { if (channelIndexes.size() > 0) { final int channelIndex = channelIndexes.remove(0); BufferBuilder bufferBuilder = bufferProvider.requestBufferBuilderBlocking(); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); bufferBuilder.appendAndCommit(ByteBuffer.wrap(new byte[4])); bufferBuilder.finish(); return new BufferConsumerAndChannel(bufferConsumer, channelIndex); } return null; }
Example 11
Source File: IteratorWrappingTestSingleInputGate.java From flink with Apache License 2.0 | 5 votes |
private IteratorWrappingTestSingleInputGate<T> wrapIterator(MutableObjectIterator<T> iterator) throws IOException, InterruptedException { inputIterator = iterator; serializer = new SpanningRecordSerializer<T>(); // The input iterator can produce an infinite stream. That's why we have to serialize each // record on demand and cannot do it upfront. final BufferAndAvailabilityProvider answer = new BufferAndAvailabilityProvider() { private boolean hasData = inputIterator.next(reuse) != null; @Override public Optional<BufferAndAvailability> getBufferAvailability() throws IOException { if (hasData) { serializer.serializeRecord(reuse); BufferBuilder bufferBuilder = createBufferBuilder(bufferSize); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); serializer.copyToBufferBuilder(bufferBuilder); hasData = inputIterator.next(reuse) != null; // Call getCurrentBuffer to ensure size is set return Optional.of(new BufferAndAvailability(bufferConsumer.build(), true, 0)); } else { inputChannel.setReleased(); return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE), false, 0)); } } }; inputChannel.addBufferAndAvailability(answer); inputGate.setInputChannels(inputChannel); return this; }
Example 12
Source File: StreamTaskNetworkInputTest.java From flink with Apache License 2.0 | 5 votes |
private BufferOrEvent createDataBuffer() throws IOException { BufferBuilder bufferBuilder = BufferBuilderTestUtils.createEmptyBufferBuilder(PAGE_SIZE); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); serializeRecord(42L, bufferBuilder); serializeRecord(44L, bufferBuilder); return new BufferOrEvent(bufferConsumer.build(), new InputChannelInfo(0, 0), false); }
Example 13
Source File: StreamTestSingleInputGate.java From flink with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") private void setupInputChannels() { for (int i = 0; i < numInputChannels; i++) { final int channelIndex = i; final RecordSerializer<SerializationDelegate<Object>> recordSerializer = new SpanningRecordSerializer<SerializationDelegate<Object>>(); final SerializationDelegate<Object> delegate = (SerializationDelegate<Object>) (SerializationDelegate<?>) new SerializationDelegate<>(new StreamElementSerializer<T>(serializer)); inputQueues[channelIndex] = new ConcurrentLinkedQueue<>(); inputChannels[channelIndex] = new TestInputChannel(inputGate, i); final BufferAndAvailabilityProvider answer = () -> { ConcurrentLinkedQueue<InputValue<Object>> inputQueue = inputQueues[channelIndex]; InputValue<Object> input; boolean moreAvailable; synchronized (inputQueue) { input = inputQueue.poll(); moreAvailable = !inputQueue.isEmpty(); } if (input != null && input.isStreamEnd()) { inputChannels[channelIndex].setReleased(); return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE), moreAvailable, 0)); } else if (input != null && input.isStreamRecord()) { Object inputElement = input.getStreamRecord(); delegate.setInstance(inputElement); recordSerializer.serializeRecord(delegate); BufferBuilder bufferBuilder = createBufferBuilder(bufferSize); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); recordSerializer.copyToBufferBuilder(bufferBuilder); bufferBuilder.finish(); // Call getCurrentBuffer to ensure size is set return Optional.of(new BufferAndAvailability(bufferConsumer.build(), moreAvailable, 0)); } else if (input != null && input.isEvent()) { AbstractEvent event = input.getEvent(); if (event instanceof EndOfPartitionEvent) { inputChannels[channelIndex].setReleased(); } return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(event), moreAvailable, 0)); } else { return Optional.empty(); } }; inputChannels[channelIndex].addBufferAndAvailability(answer); } inputGate.setInputChannels(inputChannels); }