Java Code Examples for org.apache.flink.runtime.io.network.buffer.BufferBuilder#finish()
The following examples show how to use
org.apache.flink.runtime.io.network.buffer.BufferBuilder#finish() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PipelinedSubpartitionWithReadViewTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testAddEmptyNonFinishedBuffer() { assertEquals(0, availablityListener.getNumNotifications()); BufferBuilder bufferBuilder = createBufferBuilder(); subpartition.add(bufferBuilder.createBufferConsumer()); assertEquals(0, availablityListener.getNumNotifications()); assertNull(readView.getNextBuffer()); bufferBuilder.finish(); bufferBuilder = createBufferBuilder(); subpartition.add(bufferBuilder.createBufferConsumer()); assertEquals(1, availablityListener.getNumNotifications()); // notification from finishing previous buffer. assertNull(readView.getNextBuffer()); assertEquals(1, subpartition.getBuffersInBacklog()); }
Example 2
Source File: PipelinedSubpartitionWithReadViewTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testAddEmptyNonFinishedBuffer() { assertEquals(0, availablityListener.getNumNotifications()); BufferBuilder bufferBuilder = createBufferBuilder(); subpartition.add(bufferBuilder.createBufferConsumer()); assertEquals(0, availablityListener.getNumNotifications()); assertNull(readView.getNextBuffer()); bufferBuilder.finish(); bufferBuilder = createBufferBuilder(); subpartition.add(bufferBuilder.createBufferConsumer()); assertEquals(1, subpartition.getBuffersInBacklog()); assertEquals(1, availablityListener.getNumNotifications()); // notification from finishing previous buffer. assertNull(readView.getNextBuffer()); assertEquals(0, subpartition.getBuffersInBacklog()); }
Example 3
Source File: PipelinedSubpartition.java From flink with Apache License 2.0 | 6 votes |
@Override public void readRecoveredState(ChannelStateReader stateReader) throws IOException, InterruptedException { boolean recycleBuffer = true; for (ReadResult readResult = ReadResult.HAS_MORE_DATA; readResult == ReadResult.HAS_MORE_DATA;) { BufferBuilder bufferBuilder = parent.getBufferPool().requestBufferBuilderBlocking(subpartitionInfo.getSubPartitionIdx()); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); try { readResult = stateReader.readOutputData(subpartitionInfo, bufferBuilder); // check whether there are some states data filled in this time if (bufferConsumer.isDataAvailable()) { add(bufferConsumer, false, false); recycleBuffer = false; bufferBuilder.finish(); } } finally { if (recycleBuffer) { bufferConsumer.close(); } } } }
Example 4
Source File: ChannelStateSerializerImplTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testReadToBufferBuilder() throws IOException { byte[] data = generateData(100); BufferBuilder bufferBuilder = new BufferBuilder(HeapMemorySegment.FACTORY.allocateUnpooledSegment(data.length, null), FreeingBufferRecycler.INSTANCE); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); new ChannelStateSerializerImpl().readData(new ByteArrayInputStream(data), wrap(bufferBuilder), Integer.MAX_VALUE); assertFalse(bufferBuilder.isFinished()); bufferBuilder.finish(); Buffer buffer = bufferConsumer.build(); assertEquals(data.length, buffer.readableBytes()); byte[] actual = new byte[buffer.readableBytes()]; buffer.asByteBuf().readBytes(actual); assertArrayEquals(data, actual); }
Example 5
Source File: PipelinedSubpartitionWithReadViewTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testAddEmptyNonFinishedBuffer() throws IOException { assertEquals(0, availablityListener.getNumNotifications()); BufferBuilder bufferBuilder = createBufferBuilder(); subpartition.add(bufferBuilder.createBufferConsumer()); assertEquals(0, availablityListener.getNumNotifications()); assertNull(readView.getNextBuffer()); bufferBuilder.finish(); bufferBuilder = createBufferBuilder(); subpartition.add(bufferBuilder.createBufferConsumer()); assertEquals(1, subpartition.getBuffersInBacklog()); assertEquals(1, availablityListener.getNumNotifications()); // notification from finishing previous buffer. assertNull(readView.getNextBuffer()); assertEquals(0, subpartition.getBuffersInBacklog()); }
Example 6
Source File: LocalInputChannelTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public BufferConsumerAndChannel getNextBufferConsumer() throws Exception { if (channelIndexes.size() > 0) { final int channelIndex = channelIndexes.remove(0); BufferBuilder bufferBuilder = bufferProvider.requestBufferBuilderBlocking(); bufferBuilder.appendAndCommit(ByteBuffer.wrap(new byte[4])); bufferBuilder.finish(); return new BufferConsumerAndChannel(bufferBuilder.createBufferConsumer(), channelIndex); } return null; }
Example 7
Source File: PartialConsumePipelinedResultTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { final ResultPartitionWriter writer = getEnvironment().getWriter(0); for (int i = 0; i < 8; i++) { final BufferBuilder bufferBuilder = writer.getBufferProvider().requestBufferBuilderBlocking(); writer.addBufferConsumer(bufferBuilder.createBufferConsumer(), 0); Thread.sleep(50); bufferBuilder.finish(); } }
Example 8
Source File: PipelinedSubpartitionWithReadViewTest.java From flink with Apache License 2.0 | 5 votes |
private void testBacklogConsistentWithNumberOfConsumableBuffers(boolean isFlushRequested, boolean isFinished) throws Exception { final int numberOfAddedBuffers = 5; for (int i = 1; i <= numberOfAddedBuffers; i++) { final BufferBuilder bufferBuilder = createFilledBufferBuilder(1024, 10); subpartition.add(bufferBuilder.createBufferConsumer()); if (i < numberOfAddedBuffers || isFinished) { bufferBuilder.finish(); } } if (isFlushRequested) { subpartition.flush(); } if (isFinished) { subpartition.finish(); } final int backlog = subpartition.getBuffersInBacklog(); int numberOfConsumableBuffers = 0; try (final CloseableRegistry closeableRegistry = new CloseableRegistry()) { while (readView.isAvailable()) { ResultSubpartition.BufferAndBacklog bufferAndBacklog = readView.getNextBuffer(); assertNotNull(bufferAndBacklog); if (bufferAndBacklog.buffer().isBuffer()) { ++numberOfConsumableBuffers; } closeableRegistry.registerCloseable(bufferAndBacklog.buffer() :: recycleBuffer); } assertThat(backlog, is(numberOfConsumableBuffers)); } }
Example 9
Source File: LocalInputChannelTest.java From flink with Apache License 2.0 | 5 votes |
@Override public BufferConsumerAndChannel getNextBufferConsumer() throws Exception { if (channelIndexes.size() > 0) { final int channelIndex = channelIndexes.remove(0); BufferBuilder bufferBuilder = bufferProvider.requestBufferBuilderBlocking(); bufferBuilder.appendAndCommit(ByteBuffer.wrap(new byte[4])); bufferBuilder.finish(); return new BufferConsumerAndChannel(bufferBuilder.createBufferConsumer(), channelIndex); } return null; }
Example 10
Source File: PartialConsumePipelinedResultTest.java From flink with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { final ResultPartitionWriter writer = getEnvironment().getWriter(0); for (int i = 0; i < 8; i++) { final BufferBuilder bufferBuilder = writer.getBufferBuilder(); writer.addBufferConsumer(bufferBuilder.createBufferConsumer(), 0); Thread.sleep(50); bufferBuilder.finish(); } }
Example 11
Source File: LocalInputChannelTest.java From flink with Apache License 2.0 | 5 votes |
@Override public BufferConsumerAndChannel getNextBufferConsumer() throws Exception { if (channelIndexes.size() > 0) { final int channelIndex = channelIndexes.remove(0); BufferBuilder bufferBuilder = bufferProvider.requestBufferBuilderBlocking(); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); bufferBuilder.appendAndCommit(ByteBuffer.wrap(new byte[4])); bufferBuilder.finish(); return new BufferConsumerAndChannel(bufferConsumer, channelIndex); } return null; }
Example 12
Source File: PartialConsumePipelinedResultTest.java From flink with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { final ResultPartitionWriter writer = getEnvironment().getWriter(0); for (int i = 0; i < 8; i++) { final BufferBuilder bufferBuilder = writer.getBufferBuilder(0); writer.addBufferConsumer(bufferBuilder.createBufferConsumer(), 0); Thread.sleep(50); bufferBuilder.finish(); } }
Example 13
Source File: SpillableSubpartitionTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Tests a fix for FLINK-12544. * * @see <a href="https://issues.apache.org/jira/browse/FLINK-12544">FLINK-12544</a> */ @Test public void testConcurrentRequestAndReleaseMemory() throws Exception { final ExecutorService executor = Executors.newFixedThreadPool(2); final NetworkBufferPool networkBufferPool = new NetworkBufferPool(10, 32); try { final CountDownLatch blockLatch = new CountDownLatch(1); final CountDownLatch doneLatch = new CountDownLatch(1); final IOManager ioManager = new IOManagerAsyncWithCountDownLatch(blockLatch, doneLatch); final ResultPartitionWithCountDownLatch partition = new ResultPartitionWithCountDownLatch( "Test", new NoOpTaskActions(), new JobID(), new ResultPartitionID(), ResultPartitionType.BLOCKING, 1, 1, new ResultPartitionManager(), new NoOpResultPartitionConsumableNotifier(), ioManager, true, doneLatch, blockLatch); final BufferPool bufferPool = networkBufferPool.createBufferPool(1, 1, Optional.of(partition)); partition.registerBufferPool(bufferPool); final BufferBuilder firstBuffer = bufferPool.requestBufferBuilderBlocking(); partition.addBufferConsumer(firstBuffer.createBufferConsumer(), 0); // Finishes the buffer consumer which could be recycled during SpillableSubpartition#releaseMemory firstBuffer.finish(); Future<Void> future = executor.submit(new Callable<Void>() { @Override public Void call() throws Exception { //Occupies the lock in SpillableSubpartition#releaseMemory, trying to get the lock in LocalBufferPool#recycle partition.releaseMemory(1); return null; } }); final CompletableFuture<?> firstCallFuture = partition.getFirstCallFuture(); firstCallFuture.thenRunAsync(() -> { try { // There are no available buffers in pool, so trigger release memory in SpillableSubpartition. // Occupies the lock in LocalBufferPool, and trying to get the lock in SpillableSubpartition. BufferBuilder secondBuffer = bufferPool.requestBufferBuilderBlocking(); assertThat(firstBuffer, is(equalTo(secondBuffer))); } catch (IOException | InterruptedException ex) { fail("Should not throw any exceptions!"); } }, executor); future.get(); } finally { networkBufferPool.destroyAllBufferPools(); networkBufferPool.destroy(); executor.shutdown(); } }
Example 14
Source File: StreamTestSingleInputGate.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") private void setupInputChannels() throws IOException, InterruptedException { for (int i = 0; i < numInputChannels; i++) { final int channelIndex = i; final RecordSerializer<SerializationDelegate<Object>> recordSerializer = new SpanningRecordSerializer<SerializationDelegate<Object>>(); final SerializationDelegate<Object> delegate = (SerializationDelegate<Object>) (SerializationDelegate<?>) new SerializationDelegate<StreamElement>(new StreamElementSerializer<T>(serializer)); inputQueues[channelIndex] = new ConcurrentLinkedQueue<InputValue<Object>>(); inputChannels[channelIndex] = new TestInputChannel(inputGate, i); final BufferAndAvailabilityProvider answer = () -> { ConcurrentLinkedQueue<InputValue<Object>> inputQueue = inputQueues[channelIndex]; InputValue<Object> input; boolean moreAvailable; synchronized (inputQueue) { input = inputQueue.poll(); moreAvailable = !inputQueue.isEmpty(); } if (input != null && input.isStreamEnd()) { inputChannels[channelIndex].setReleased(); return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE), moreAvailable, 0)); } else if (input != null && input.isStreamRecord()) { Object inputElement = input.getStreamRecord(); delegate.setInstance(inputElement); recordSerializer.serializeRecord(delegate); BufferBuilder bufferBuilder = createBufferBuilder(bufferSize); recordSerializer.copyToBufferBuilder(bufferBuilder); bufferBuilder.finish(); // Call getCurrentBuffer to ensure size is set return Optional.of(new BufferAndAvailability(buildSingleBuffer(bufferBuilder), moreAvailable, 0)); } else if (input != null && input.isEvent()) { AbstractEvent event = input.getEvent(); return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(event), moreAvailable, 0)); } else { return Optional.empty(); } }; inputChannels[channelIndex].addBufferAndAvailability(answer); inputGate.setInputChannel(new IntermediateResultPartitionID(), inputChannels[channelIndex]); } }
Example 15
Source File: StreamTestSingleInputGate.java From flink with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") private void setupInputChannels() throws IOException, InterruptedException { for (int i = 0; i < numInputChannels; i++) { final int channelIndex = i; final RecordSerializer<SerializationDelegate<Object>> recordSerializer = new SpanningRecordSerializer<SerializationDelegate<Object>>(); final SerializationDelegate<Object> delegate = (SerializationDelegate<Object>) (SerializationDelegate<?>) new SerializationDelegate<StreamElement>(new StreamElementSerializer<T>(serializer)); inputQueues[channelIndex] = new ConcurrentLinkedQueue<InputValue<Object>>(); inputChannels[channelIndex] = new TestInputChannel(inputGate, i); final BufferAndAvailabilityProvider answer = () -> { ConcurrentLinkedQueue<InputValue<Object>> inputQueue = inputQueues[channelIndex]; InputValue<Object> input; boolean moreAvailable; synchronized (inputQueue) { input = inputQueue.poll(); moreAvailable = !inputQueue.isEmpty(); } if (input != null && input.isStreamEnd()) { inputChannels[channelIndex].setReleased(); return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE), moreAvailable, 0)); } else if (input != null && input.isStreamRecord()) { Object inputElement = input.getStreamRecord(); delegate.setInstance(inputElement); recordSerializer.serializeRecord(delegate); BufferBuilder bufferBuilder = createBufferBuilder(bufferSize); recordSerializer.copyToBufferBuilder(bufferBuilder); bufferBuilder.finish(); // Call getCurrentBuffer to ensure size is set return Optional.of(new BufferAndAvailability(buildSingleBuffer(bufferBuilder), moreAvailable, 0)); } else if (input != null && input.isEvent()) { AbstractEvent event = input.getEvent(); if (event instanceof EndOfPartitionEvent) { inputChannels[channelIndex].setReleased(); } return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(event), moreAvailable, 0)); } else { return Optional.empty(); } }; inputChannels[channelIndex].addBufferAndAvailability(answer); inputGate.setInputChannel(new IntermediateResultPartitionID(), inputChannels[channelIndex]); } }
Example 16
Source File: StreamTestSingleInputGate.java From flink with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") private void setupInputChannels() { for (int i = 0; i < numInputChannels; i++) { final int channelIndex = i; final RecordSerializer<SerializationDelegate<Object>> recordSerializer = new SpanningRecordSerializer<SerializationDelegate<Object>>(); final SerializationDelegate<Object> delegate = (SerializationDelegate<Object>) (SerializationDelegate<?>) new SerializationDelegate<>(new StreamElementSerializer<T>(serializer)); inputQueues[channelIndex] = new ConcurrentLinkedQueue<>(); inputChannels[channelIndex] = new TestInputChannel(inputGate, i); final BufferAndAvailabilityProvider answer = () -> { ConcurrentLinkedQueue<InputValue<Object>> inputQueue = inputQueues[channelIndex]; InputValue<Object> input; boolean moreAvailable; synchronized (inputQueue) { input = inputQueue.poll(); moreAvailable = !inputQueue.isEmpty(); } if (input != null && input.isStreamEnd()) { inputChannels[channelIndex].setReleased(); return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE), moreAvailable, 0)); } else if (input != null && input.isStreamRecord()) { Object inputElement = input.getStreamRecord(); delegate.setInstance(inputElement); recordSerializer.serializeRecord(delegate); BufferBuilder bufferBuilder = createBufferBuilder(bufferSize); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer(); recordSerializer.copyToBufferBuilder(bufferBuilder); bufferBuilder.finish(); // Call getCurrentBuffer to ensure size is set return Optional.of(new BufferAndAvailability(bufferConsumer.build(), moreAvailable, 0)); } else if (input != null && input.isEvent()) { AbstractEvent event = input.getEvent(); if (event instanceof EndOfPartitionEvent) { inputChannels[channelIndex].setReleased(); } return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(event), moreAvailable, 0)); } else { return Optional.empty(); } }; inputChannels[channelIndex].addBufferAndAvailability(answer); } inputGate.setInputChannels(inputChannels); }