org.apache.flink.runtime.io.network.buffer.BufferBuilder Java Examples
The following examples show how to use
org.apache.flink.runtime.io.network.buffer.BufferBuilder.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ChannelStateSerializerImplTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testReadToBufferBuilder() throws IOException { byte[] data = generateData(100); BufferBuilder bufferBuilder = new BufferBuilder(HeapMemorySegment.FACTORY.allocateUnpooledSegment(data.length, null), FreeingBufferRecycler.INSTANCE); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); new ChannelStateSerializerImpl().readData(new ByteArrayInputStream(data), wrap(bufferBuilder), Integer.MAX_VALUE); assertFalse(bufferBuilder.isFinished()); bufferBuilder.finish(); Buffer buffer = bufferConsumer.build(); assertEquals(data.length, buffer.readableBytes()); byte[] actual = new byte[buffer.readableBytes()]; buffer.asByteBuf().readBytes(actual); assertArrayEquals(data, actual); }
Example #2
Source File: SpanningRecordSerializerTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testHasSerializedData() throws IOException { final SpanningRecordSerializer<SerializationTestType> serializer = new SpanningRecordSerializer<>(); final SerializationTestType randomIntRecord = Util.randomRecord(SerializationTestTypeFactory.INT); Assert.assertFalse(serializer.hasSerializedData()); serializer.serializeRecord(randomIntRecord); Assert.assertTrue(serializer.hasSerializedData()); final BufferBuilder bufferBuilder1 = createBufferBuilder(16); serializer.copyToBufferBuilder(bufferBuilder1); Assert.assertFalse(serializer.hasSerializedData()); final BufferBuilder bufferBuilder2 = createBufferBuilder(8); serializer.reset(); serializer.copyToBufferBuilder(bufferBuilder2); Assert.assertFalse(serializer.hasSerializedData()); serializer.reset(); serializer.copyToBufferBuilder(bufferBuilder2); // Buffer builder full! Assert.assertTrue(serializer.hasSerializedData()); }
Example #3
Source File: SpanningRecordSerializerTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testHasSerializedData() throws IOException { final SpanningRecordSerializer<SerializationTestType> serializer = new SpanningRecordSerializer<>(); final SerializationTestType randomIntRecord = Util.randomRecord(SerializationTestTypeFactory.INT); Assert.assertFalse(serializer.hasSerializedData()); serializer.serializeRecord(randomIntRecord); Assert.assertTrue(serializer.hasSerializedData()); final BufferBuilder bufferBuilder1 = createBufferBuilder(16); serializer.copyToBufferBuilder(bufferBuilder1); Assert.assertFalse(serializer.hasSerializedData()); final BufferBuilder bufferBuilder2 = createBufferBuilder(8); serializer.reset(); serializer.copyToBufferBuilder(bufferBuilder2); Assert.assertFalse(serializer.hasSerializedData()); serializer.reset(); serializer.copyToBufferBuilder(bufferBuilder2); // Buffer builder full! Assert.assertTrue(serializer.hasSerializedData()); }
Example #4
Source File: PipelinedSubpartitionWithReadViewTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testAddEmptyNonFinishedBuffer() { assertEquals(0, availablityListener.getNumNotifications()); BufferBuilder bufferBuilder = createBufferBuilder(); subpartition.add(bufferBuilder.createBufferConsumer()); assertEquals(0, availablityListener.getNumNotifications()); assertNull(readView.getNextBuffer()); bufferBuilder.finish(); bufferBuilder = createBufferBuilder(); subpartition.add(bufferBuilder.createBufferConsumer()); assertEquals(1, availablityListener.getNumNotifications()); // notification from finishing previous buffer. assertNull(readView.getNextBuffer()); assertEquals(1, subpartition.getBuffersInBacklog()); }
Example #5
Source File: BackPressureStatsTrackerImplITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { final BufferBuilder bufferBuilder = testBufferPool.requestBufferBuilderBlocking(); // Got a buffer, yay! BufferBuilderTestUtils.buildSingleBuffer(bufferBuilder).recycleBuffer(); Thread.currentThread().join(); }
Example #6
Source File: PartialConsumePipelinedResultTest.java From flink with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { final ResultPartitionWriter writer = getEnvironment().getWriter(0); for (int i = 0; i < 8; i++) { final BufferBuilder bufferBuilder = writer.getBufferBuilder(0); writer.addBufferConsumer(bufferBuilder.createBufferConsumer(), 0); Thread.sleep(50); bufferBuilder.finish(); } }
Example #7
Source File: RecordWriter.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * The {@link BufferBuilder} may already exist if not filled up last time, otherwise we need * request a new one for this target channel. */ private BufferBuilder getBufferBuilder(int targetChannel) throws IOException, InterruptedException { if (bufferBuilders[targetChannel].isPresent()) { return bufferBuilders[targetChannel].get(); } else { return requestNewBufferBuilder(targetChannel); } }
Example #8
Source File: RecordWriter.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Marks the current {@link BufferBuilder} as finished and clears the state for next one. */ private void tryFinishCurrentBufferBuilder(int targetChannel) { if (!bufferBuilders[targetChannel].isPresent()) { return; } BufferBuilder bufferBuilder = bufferBuilders[targetChannel].get(); bufferBuilders[targetChannel] = Optional.empty(); numBytesOut.inc(bufferBuilder.finish()); numBuffersOut.inc(); }
Example #9
Source File: SpanningRecordSerializer.java From flink with Apache License 2.0 | 5 votes |
/** * Copies an intermediate data serialization buffer into the target BufferBuilder. * * @param targetBuffer the target BufferBuilder to copy to * @return how much information was written to the target buffer and * whether this buffer is full */ @Override public SerializationResult copyToBufferBuilder(BufferBuilder targetBuffer) { targetBuffer.append(lengthBuffer); targetBuffer.append(dataBuffer); targetBuffer.commit(); return getSerializationResult(targetBuffer); }
Example #10
Source File: SpanningRecordSerializer.java From flink with Apache License 2.0 | 5 votes |
private SerializationResult getSerializationResult(BufferBuilder targetBuffer) { if (dataBuffer.hasRemaining() || lengthBuffer.hasRemaining()) { return SerializationResult.PARTIAL_RECORD_MEMORY_SEGMENT_FULL; } return !targetBuffer.isFull() ? SerializationResult.FULL_RECORD : SerializationResult.FULL_RECORD_MEMORY_SEGMENT_FULL; }
Example #11
Source File: RecordWriter.java From flink with Apache License 2.0 | 5 votes |
/** * @param targetChannel * @return <tt>true</tt> if the intermediate serialization buffer should be pruned */ private boolean copyFromSerializerToTargetChannel(int targetChannel) throws IOException, InterruptedException { // We should reset the initial position of the intermediate serialization buffer before // copying, so the serialization results can be copied to multiple target buffers. serializer.reset(); boolean pruneTriggered = false; BufferBuilder bufferBuilder = getBufferBuilder(targetChannel); SerializationResult result = serializer.copyToBufferBuilder(bufferBuilder); while (result.isFullBuffer()) { numBytesOut.inc(bufferBuilder.finish()); numBuffersOut.inc(); // If this was a full record, we are done. Not breaking out of the loop at this point // will lead to another buffer request before breaking out (that would not be a // problem per se, but it can lead to stalls in the pipeline). if (result.isFullRecord()) { pruneTriggered = true; bufferBuilders[targetChannel] = Optional.empty(); break; } bufferBuilder = requestNewBufferBuilder(targetChannel); result = serializer.copyToBufferBuilder(bufferBuilder); } checkState(!serializer.hasSerializedData(), "All data should be written at once"); if (flushAlways) { targetPartition.flush(targetChannel); } return pruneTriggered; }
Example #12
Source File: RecordWriter.java From flink with Apache License 2.0 | 5 votes |
/** * The {@link BufferBuilder} may already exist if not filled up last time, otherwise we need * request a new one for this target channel. */ private BufferBuilder getBufferBuilder(int targetChannel) throws IOException, InterruptedException { if (bufferBuilders[targetChannel].isPresent()) { return bufferBuilders[targetChannel].get(); } else { return requestNewBufferBuilder(targetChannel); } }
Example #13
Source File: SpanningRecordSerializationTest.java From flink with Apache License 2.0 | 5 votes |
private static BufferAndSerializerResult setNextBufferForSerializer( RecordSerializer<SerializationTestType> serializer, int segmentSize) throws IOException { // create a bufferBuilder with some random starting offset to properly test handling buffer slices in the // deserialization code. int startingOffset = segmentSize > 2 ? RANDOM.nextInt(segmentSize / 2) : 0; BufferBuilder bufferBuilder = createFilledBufferBuilder(segmentSize + startingOffset, startingOffset); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); bufferConsumer.build().recycleBuffer(); return new BufferAndSerializerResult( bufferBuilder, bufferConsumer, serializer.copyToBufferBuilder(bufferBuilder)); }
Example #14
Source File: SpanningRecordSerializationTest.java From flink with Apache License 2.0 | 5 votes |
public BufferAndSerializerResult( BufferBuilder bufferBuilder, BufferConsumer bufferConsumer, RecordSerializer.SerializationResult serializationResult) { this.bufferBuilder = bufferBuilder; this.bufferConsumer = bufferConsumer; this.serializationResult = serializationResult; }
Example #15
Source File: SpanningRecordSerializerTest.java From flink with Apache License 2.0 | 5 votes |
/** * Iterates over the provided records and tests whether the {@link SpanningRecordSerializer} returns the expected * {@link RecordSerializer.SerializationResult} values. * * <p>Only a single {@link MemorySegment} will be allocated. * * @param records records to test * @param segmentSize size for the {@link MemorySegment} */ private void test(Util.MockRecords records, int segmentSize) throws Exception { final int serializationOverhead = 4; // length encoding final SpanningRecordSerializer<SerializationTestType> serializer = new SpanningRecordSerializer<>(); // ------------------------------------------------------------------------------------------------------------- BufferBuilder bufferBuilder = createBufferBuilder(segmentSize); int numBytes = 0; for (SerializationTestType record : records) { serializer.serializeRecord(record); RecordSerializer.SerializationResult result = serializer.copyToBufferBuilder(bufferBuilder); numBytes += record.length() + serializationOverhead; if (numBytes < segmentSize) { Assert.assertEquals(RecordSerializer.SerializationResult.FULL_RECORD, result); } else if (numBytes == segmentSize) { Assert.assertEquals(RecordSerializer.SerializationResult.FULL_RECORD_MEMORY_SEGMENT_FULL, result); bufferBuilder = createBufferBuilder(segmentSize); numBytes = 0; } else { Assert.assertEquals(RecordSerializer.SerializationResult.PARTIAL_RECORD_MEMORY_SEGMENT_FULL, result); while (result.isFullBuffer()) { numBytes -= segmentSize; bufferBuilder = createBufferBuilder(segmentSize); result = serializer.copyToBufferBuilder(bufferBuilder); } Assert.assertTrue(result.isFullRecord()); } } }
Example #16
Source File: PipelinedSubpartitionWithReadViewTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testAddNonEmptyNotFinishedBuffer() throws Exception { assertEquals(0, availablityListener.getNumNotifications()); BufferBuilder bufferBuilder = createBufferBuilder(); bufferBuilder.appendAndCommit(ByteBuffer.allocate(1024)); subpartition.add(bufferBuilder.createBufferConsumer()); // note that since the buffer builder is not finished, there is still a retained instance! assertEquals(0, subpartition.getBuffersInBacklog()); assertNextBuffer(readView, 1024, false, 0, false, false); }
Example #17
Source File: SpanningRecordSerializerTest.java From flink with Apache License 2.0 | 5 votes |
/** * Iterates over the provided records and tests whether the {@link SpanningRecordSerializer} returns the expected * {@link RecordSerializer.SerializationResult} values. * * <p>Only a single {@link MemorySegment} will be allocated. * * @param records records to test * @param segmentSize size for the {@link MemorySegment} */ private void test(Util.MockRecords records, int segmentSize) throws Exception { final int serializationOverhead = 4; // length encoding final SpanningRecordSerializer<SerializationTestType> serializer = new SpanningRecordSerializer<>(); // ------------------------------------------------------------------------------------------------------------- BufferBuilder bufferBuilder = createBufferBuilder(segmentSize); int numBytes = 0; for (SerializationTestType record : records) { serializer.serializeRecord(record); RecordSerializer.SerializationResult result = serializer.copyToBufferBuilder(bufferBuilder); numBytes += record.length() + serializationOverhead; if (numBytes < segmentSize) { Assert.assertEquals(RecordSerializer.SerializationResult.FULL_RECORD, result); } else if (numBytes == segmentSize) { Assert.assertEquals(RecordSerializer.SerializationResult.FULL_RECORD_MEMORY_SEGMENT_FULL, result); bufferBuilder = createBufferBuilder(segmentSize); numBytes = 0; } else { Assert.assertEquals(RecordSerializer.SerializationResult.PARTIAL_RECORD_MEMORY_SEGMENT_FULL, result); while (result.isFullBuffer()) { numBytes -= segmentSize; bufferBuilder = createBufferBuilder(segmentSize); result = serializer.copyToBufferBuilder(bufferBuilder); } Assert.assertTrue(result.isFullRecord()); } } }
Example #18
Source File: IteratorWrappingTestSingleInputGate.java From flink with Apache License 2.0 | 5 votes |
private IteratorWrappingTestSingleInputGate<T> wrapIterator(MutableObjectIterator<T> iterator) throws IOException, InterruptedException { inputIterator = iterator; serializer = new SpanningRecordSerializer<T>(); // The input iterator can produce an infinite stream. That's why we have to serialize each // record on demand and cannot do it upfront. final BufferAndAvailabilityProvider answer = new BufferAndAvailabilityProvider() { private boolean hasData = inputIterator.next(reuse) != null; @Override public Optional<BufferAndAvailability> getBufferAvailability() throws IOException { if (hasData) { serializer.serializeRecord(reuse); BufferBuilder bufferBuilder = createBufferBuilder(bufferSize); serializer.copyToBufferBuilder(bufferBuilder); hasData = inputIterator.next(reuse) != null; // Call getCurrentBuffer to ensure size is set return Optional.of(new BufferAndAvailability(buildSingleBuffer(bufferBuilder), true, 0)); } else { inputChannel.setReleased(); return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE), false, 0)); } } }; inputChannel.addBufferAndAvailability(answer); inputGate.setInputChannel(new IntermediateResultPartitionID(), inputChannel); return this; }
Example #19
Source File: ResultPartitionTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests {@link ResultPartition#releaseMemory(int)} on a working partition. * * @param resultPartitionType the result partition type to set up */ private void testReleaseMemory(final ResultPartitionType resultPartitionType) throws Exception { final int numAllBuffers = 10; final NettyShuffleEnvironment network = new NettyShuffleEnvironmentBuilder() .setNumNetworkBuffers(numAllBuffers).build(); final ResultPartition resultPartition = createPartition(network, resultPartitionType, 1); try { resultPartition.setup(); // take all buffers (more than the minimum required) for (int i = 0; i < numAllBuffers; ++i) { BufferBuilder bufferBuilder = resultPartition.getBufferPool().requestBufferBuilderBlocking(); resultPartition.addBufferConsumer(bufferBuilder.createBufferConsumer(), 0); } resultPartition.finish(); assertEquals(0, resultPartition.getBufferPool().getNumberOfAvailableMemorySegments()); // reset the pool size less than the number of requested buffers final int numLocalBuffers = 4; resultPartition.getBufferPool().setNumBuffers(numLocalBuffers); // partition with blocking type should release excess buffers if (!resultPartitionType.hasBackPressure()) { assertEquals(numLocalBuffers, resultPartition.getBufferPool().getNumberOfAvailableMemorySegments()); } else { assertEquals(0, resultPartition.getBufferPool().getNumberOfAvailableMemorySegments()); } } finally { resultPartition.release(); network.close(); } }
Example #20
Source File: StreamTaskNetworkInputTest.java From flink with Apache License 2.0 | 5 votes |
private void serializeRecord(long value, BufferBuilder bufferBuilder) throws IOException { RecordSerializer<SerializationDelegate<StreamElement>> serializer = new SpanningRecordSerializer<>(); SerializationDelegate<StreamElement> serializationDelegate = new SerializationDelegate<>( new StreamElementSerializer<>(LongSerializer.INSTANCE)); serializationDelegate.setInstance(new StreamRecord<>(value)); serializer.serializeRecord(serializationDelegate); assertFalse(serializer.copyToBufferBuilder(bufferBuilder).isFullBuffer()); }
Example #21
Source File: ChannelStateReaderImpl.java From flink with Apache License 2.0 | 5 votes |
@Override public ReadResult readOutputData(ResultSubpartitionInfo info, BufferBuilder bufferBuilder) throws IOException { Preconditions.checkState(!isClosed, "reader is closed"); log.debug("readOutputData, resultSubpartitionInfo: {} , bufferBuilder {}", info, bufferBuilder); ChannelStateStreamReader reader = resultSubpartitionHandleReaders.get(info); return reader == null ? ReadResult.NO_MORE_DATA : reader.readInto(bufferBuilder); }
Example #22
Source File: SpanningRecordSerializationTest.java From flink with Apache License 2.0 | 5 votes |
private static Buffer appendLeftOverBytes(Buffer buffer, byte[] leftOverBytes) { BufferBuilder bufferBuilder = new BufferBuilder( MemorySegmentFactory.allocateUnpooledSegment(buffer.readableBytes() + leftOverBytes.length), FreeingBufferRecycler.INSTANCE); try (BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer()) { bufferBuilder.append(buffer.getNioBufferReadable()); bufferBuilder.appendAndCommit(ByteBuffer.wrap(leftOverBytes)); return bufferConsumer.build(); } }
Example #23
Source File: SpanningRecordSerializer.java From flink with Apache License 2.0 | 5 votes |
/** * Copies an intermediate data serialization buffer into the target BufferBuilder. * * @param targetBuffer the target BufferBuilder to copy to * @return how much information was written to the target buffer and * whether this buffer is full */ @Override public SerializationResult copyToBufferBuilder(BufferBuilder targetBuffer) { targetBuffer.append(dataBuffer); targetBuffer.commit(); return getSerializationResult(targetBuffer); }
Example #24
Source File: SpanningRecordSerializer.java From flink with Apache License 2.0 | 5 votes |
private SerializationResult getSerializationResult(BufferBuilder targetBuffer) { if (dataBuffer.hasRemaining()) { return SerializationResult.PARTIAL_RECORD_MEMORY_SEGMENT_FULL; } return !targetBuffer.isFull() ? SerializationResult.FULL_RECORD : SerializationResult.FULL_RECORD_MEMORY_SEGMENT_FULL; }
Example #25
Source File: ChannelSelectorRecordWriter.java From flink with Apache License 2.0 | 5 votes |
ChannelSelectorRecordWriter( ResultPartitionWriter writer, ChannelSelector<T> channelSelector, long timeout, String taskName) { super(writer, timeout, taskName); this.channelSelector = checkNotNull(channelSelector); this.channelSelector.setup(numberOfChannels); this.bufferBuilders = new BufferBuilder[numberOfChannels]; }
Example #26
Source File: ChannelSelectorRecordWriter.java From flink with Apache License 2.0 | 5 votes |
@Override public BufferBuilder getBufferBuilder(int targetChannel) throws IOException, InterruptedException { if (bufferBuilders[targetChannel] != null) { return bufferBuilders[targetChannel]; } else { return requestNewBufferBuilder(targetChannel); } }
Example #27
Source File: ChannelSelectorRecordWriter.java From flink with Apache License 2.0 | 5 votes |
@Override public BufferBuilder requestNewBufferBuilder(int targetChannel) throws IOException, InterruptedException { checkState(bufferBuilders[targetChannel] == null || bufferBuilders[targetChannel].isFinished()); BufferBuilder bufferBuilder = super.requestNewBufferBuilder(targetChannel); addBufferConsumer(bufferBuilder.createBufferConsumer(), targetChannel); bufferBuilders[targetChannel] = bufferBuilder; return bufferBuilder; }
Example #28
Source File: RecordWriter.java From flink with Apache License 2.0 | 5 votes |
/** * Requests a new {@link BufferBuilder} for the target channel and returns it. */ public BufferBuilder requestNewBufferBuilder(int targetChannel) throws IOException, InterruptedException { BufferBuilder builder = targetPartition.tryGetBufferBuilder(targetChannel); if (builder == null) { long start = System.currentTimeMillis(); builder = targetPartition.getBufferBuilder(targetChannel); idleTimeMsPerSecond.markEvent(System.currentTimeMillis() - start); } return builder; }
Example #29
Source File: ChannelPersistenceITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testReadWritten() throws Exception { long checkpointId = 1L; InputChannelInfo inputChannelInfo = new InputChannelInfo(2, 3); byte[] inputChannelInfoData = randomBytes(1024); ResultSubpartitionInfo resultSubpartitionInfo = new ResultSubpartitionInfo(4, 5); byte[] resultSubpartitionInfoData = randomBytes(1024); ChannelStateWriteResult handles = write( checkpointId, singletonMap(inputChannelInfo, inputChannelInfoData), singletonMap(resultSubpartitionInfo, resultSubpartitionInfoData) ); assertArrayEquals(inputChannelInfoData, read( toTaskStateSnapshot(handles), inputChannelInfoData.length, (reader, mem) -> reader.readInputData(inputChannelInfo, new NetworkBuffer(mem, FreeingBufferRecycler.INSTANCE)) )); assertArrayEquals(resultSubpartitionInfoData, read( toTaskStateSnapshot(handles), resultSubpartitionInfoData.length, (reader, mem) -> reader.readOutputData(resultSubpartitionInfo, new BufferBuilder(mem, FreeingBufferRecycler.INSTANCE)) )); }
Example #30
Source File: BroadcastRecordWriter.java From flink with Apache License 2.0 | 5 votes |
@Override public void tryFinishCurrentBufferBuilder(int targetChannel) { if (bufferBuilder == null) { return; } BufferBuilder builder = bufferBuilder; bufferBuilder = null; finishBufferBuilder(builder); }