org.apache.flink.runtime.io.network.partition.ResultSubpartition.BufferAndBacklog Java Examples
The following examples show how to use
org.apache.flink.runtime.io.network.partition.ResultSubpartition.BufferAndBacklog.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CreditBasedSequenceNumberingViewReader.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public BufferAndAvailability getNextBuffer() throws IOException, InterruptedException { BufferAndBacklog next = subpartitionView.getNextBuffer(); if (next != null) { sequenceNumber++; if (next.buffer().isBuffer() && --numCreditsAvailable < 0) { throw new IllegalStateException("no credit available"); } return new BufferAndAvailability( next.buffer(), isAvailable(next), next.buffersInBacklog()); } else { return null; } }
Example #2
Source File: CreditBasedSequenceNumberingViewReader.java From flink with Apache License 2.0 | 6 votes |
@Override public BufferAndAvailability getNextBuffer() throws IOException { BufferAndBacklog next = subpartitionView.getNextBuffer(); if (next != null) { sequenceNumber++; if (next.buffer().isBuffer() && --numCreditsAvailable < 0) { throw new IllegalStateException("no credit available"); } return new BufferAndAvailability( next.buffer(), isAvailable(next), next.buffersInBacklog()); } else { return null; } }
Example #3
Source File: SpilledSubpartitionView.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Nullable @Override public BufferAndBacklog getNextBuffer() throws IOException, InterruptedException { if (isSpillInProgress) { return null; } Buffer current; boolean nextBufferIsEvent; synchronized (this) { if (nextBuffer == null) { current = requestAndFillBuffer(); } else { current = nextBuffer; } nextBuffer = requestAndFillBuffer(); nextBufferIsEvent = nextBuffer != null && !nextBuffer.isBuffer(); } if (current == null) { return null; } int newBacklog = parent.decreaseBuffersInBacklog(current); return new BufferAndBacklog(current, newBacklog > 0 || nextBufferIsEvent, newBacklog, nextBufferIsEvent); }
Example #4
Source File: BoundedBlockingSubpartitionReader.java From flink with Apache License 2.0 | 6 votes |
@Nullable @Override public BufferAndBacklog getNextBuffer() throws IOException { final Buffer current = nextBuffer; // copy reference to stack if (current == null) { // as per contract, we must return null when the reader is empty, // but also in case the reader is disposed (rather than throwing an exception) return null; } if (current.isBuffer()) { dataBufferBacklog--; } assert dataReader != null; nextBuffer = dataReader.nextBuffer(); return BufferAndBacklog.fromBufferAndLookahead(current, nextBuffer, dataBufferBacklog); }
Example #5
Source File: BoundedBlockingSubpartitionReader.java From flink with Apache License 2.0 | 6 votes |
@Nullable @Override public BufferAndBacklog getNextBuffer() throws IOException { final Buffer current = nextBuffer; // copy reference to stack if (current == null) { // as per contract, we must return null when the reader is empty, // but also in case the reader is disposed (rather than throwing an exception) return null; } if (current.isBuffer()) { dataBufferBacklog--; } assert dataReader != null; nextBuffer = dataReader.nextBuffer(); return BufferAndBacklog.fromBufferAndLookahead(current, nextBuffer, dataBufferBacklog); }
Example #6
Source File: CreditBasedSequenceNumberingViewReader.java From flink with Apache License 2.0 | 6 votes |
@Override public BufferAndAvailability getNextBuffer() throws IOException, InterruptedException { BufferAndBacklog next = subpartitionView.getNextBuffer(); if (next != null) { sequenceNumber++; if (next.buffer().isBuffer() && --numCreditsAvailable < 0) { throw new IllegalStateException("no credit available"); } return new BufferAndAvailability( next.buffer(), isAvailable(next), next.buffersInBacklog()); } else { return null; } }
Example #7
Source File: SequenceNumberingViewReader.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public BufferAndAvailability getNextBuffer() throws IOException, InterruptedException { BufferAndBacklog next = subpartitionView.getNextBuffer(); if (next != null) { sequenceNumber++; return new BufferAndAvailability(next.buffer(), next.isMoreAvailable(), next.buffersInBacklog()); } else { return null; } }
Example #8
Source File: BoundedBlockingSubpartitionAvailabilityTest.java From flink with Apache License 2.0 | 5 votes |
private static List<BufferAndBacklog> drainAvailableData(ResultSubpartitionView reader) throws Exception { final ArrayList<BufferAndBacklog> list = new ArrayList<>(); BufferAndBacklog bab; while ((bab = reader.getNextBuffer()) != null) { list.add(bab); } return list; }
Example #9
Source File: FileChannelBoundedDataTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testRecycleBufferForNotifyingBufferAvailabilityListener() throws Exception { final ResultSubpartition subpartition = createFileBoundedBlockingSubpartition(); final int numberOfBuffers = 2; writeBuffers(subpartition, numberOfBuffers); final VerifyNotificationBufferAvailabilityListener listener = new VerifyNotificationBufferAvailabilityListener(); final ResultSubpartitionView subpartitionView = subpartition.createReadView(listener); // the notification is triggered while creating view assertTrue(listener.isAvailable); listener.resetAvailable(); assertFalse(listener.isAvailable); final BufferAndBacklog buffer1 = subpartitionView.getNextBuffer(); final BufferAndBacklog buffer2 = subpartitionView.getNextBuffer(); assertNotNull(buffer1); assertNotNull(buffer2); // the next buffer is null in view because FileBufferReader has no available buffers for reading ahead assertFalse(subpartitionView.isAvailable()); // recycle a buffer to trigger notification of data available buffer1.buffer().recycleBuffer(); assertTrue(listener.isAvailable); // cleanup buffer2.buffer().recycleBuffer(); subpartitionView.releaseAllResources(); subpartition.release(); }
Example #10
Source File: CancelPartitionRequestTest.java From flink with Apache License 2.0 | 5 votes |
@Nullable @Override public BufferAndBacklog getNextBuffer() throws IOException, InterruptedException { Buffer buffer = bufferProvider.requestBufferBlocking(); buffer.setSize(buffer.getMaxCapacity()); // fake some data return new BufferAndBacklog(buffer, true, 0, false); }
Example #11
Source File: PartitionRequestQueueTest.java From flink with Apache License 2.0 | 5 votes |
@Nullable @Override public BufferAndBacklog getNextBuffer() { BufferAndBacklog nextBuffer = super.getNextBuffer(); return new BufferAndBacklog( nextBuffer.buffer().readOnlySlice(), nextBuffer.isMoreAvailable(), nextBuffer.buffersInBacklog(), nextBuffer.nextBufferIsEvent()); }
Example #12
Source File: PartitionRequestQueueTest.java From flink with Apache License 2.0 | 5 votes |
@Nullable @Override public BufferAndBacklog getNextBuffer() { int buffers = buffersInBacklog.decrementAndGet(); return new BufferAndBacklog( TestBufferFactory.createBuffer(10), buffers > 0, buffers, false); }
Example #13
Source File: LocalInputChannel.java From flink with Apache License 2.0 | 5 votes |
@Override Optional<BufferAndAvailability> getNextBuffer() throws IOException, InterruptedException { checkError(); ResultSubpartitionView subpartitionView = this.subpartitionView; if (subpartitionView == null) { // There is a possible race condition between writing a EndOfPartitionEvent (1) and flushing (3) the Local // channel on the sender side, and reading EndOfPartitionEvent (2) and processing flush notification (4). When // they happen in that order (1 - 2 - 3 - 4), flush notification can re-enqueue LocalInputChannel after (or // during) it was released during reading the EndOfPartitionEvent (2). if (isReleased) { return Optional.empty(); } // this can happen if the request for the partition was triggered asynchronously // by the time trigger // would be good to avoid that, by guaranteeing that the requestPartition() and // getNextBuffer() always come from the same thread // we could do that by letting the timer insert a special "requesting channel" into the input gate's queue subpartitionView = checkAndWaitForSubpartitionView(); } BufferAndBacklog next = subpartitionView.getNextBuffer(); if (next == null) { if (subpartitionView.isReleased()) { throw new CancelTaskException("Consumed partition " + subpartitionView + " has been released."); } else { return Optional.empty(); } } numBytesIn.inc(next.buffer().getSize()); numBuffersIn.inc(); return Optional.of(new BufferAndAvailability(next.buffer(), next.isMoreAvailable(), next.buffersInBacklog())); }
Example #14
Source File: SequenceNumberingViewReader.java From flink with Apache License 2.0 | 5 votes |
@Override public BufferAndAvailability getNextBuffer() throws IOException, InterruptedException { BufferAndBacklog next = subpartitionView.getNextBuffer(); if (next != null) { sequenceNumber++; return new BufferAndAvailability(next.buffer(), next.isMoreAvailable(), next.buffersInBacklog()); } else { return null; } }
Example #15
Source File: PartitionRequestQueueTest.java From flink with Apache License 2.0 | 5 votes |
@Nullable @Override public BufferAndBacklog getNextBuffer() { int buffers = buffersInBacklog.decrementAndGet(); return new BufferAndBacklog( TestBufferFactory.createBuffer(10), buffers > 0, buffers, false); }
Example #16
Source File: PartitionRequestQueueTest.java From flink with Apache License 2.0 | 5 votes |
@Nullable @Override public BufferAndBacklog getNextBuffer() { BufferAndBacklog nextBuffer = super.getNextBuffer(); return new BufferAndBacklog( nextBuffer.buffer().readOnlySlice(), nextBuffer.isDataAvailable(), nextBuffer.buffersInBacklog(), nextBuffer.isEventAvailable()); }
Example #17
Source File: CancelPartitionRequestTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Nullable @Override public BufferAndBacklog getNextBuffer() throws IOException, InterruptedException { Buffer buffer = bufferProvider.requestBufferBlocking(); buffer.setSize(buffer.getMaxCapacity()); // fake some data return new BufferAndBacklog(buffer, true, 0, false); }
Example #18
Source File: CancelPartitionRequestTest.java From flink with Apache License 2.0 | 5 votes |
@Nullable @Override public BufferAndBacklog getNextBuffer() throws IOException { Buffer buffer = bufferProvider.requestBuffer(); if (buffer != null) { buffer.setSize(buffer.getMaxCapacity()); // fake some data return new BufferAndBacklog(buffer, true, 0, false); } else { return null; } }
Example #19
Source File: PartitionRequestQueueTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Nullable @Override public BufferAndBacklog getNextBuffer() { BufferAndBacklog nextBuffer = super.getNextBuffer(); return new BufferAndBacklog( nextBuffer.buffer().readOnlySlice(), nextBuffer.isMoreAvailable(), nextBuffer.buffersInBacklog(), nextBuffer.nextBufferIsEvent()); }
Example #20
Source File: PartitionRequestQueueTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Nullable @Override public BufferAndBacklog getNextBuffer() { int buffers = buffersInBacklog.decrementAndGet(); return new BufferAndBacklog( TestBufferFactory.createBuffer(10), buffers > 0, buffers, false); }
Example #21
Source File: FileChannelBoundedDataTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testRecycleBufferForNotifyingBufferAvailabilityListener() throws Exception { final ResultSubpartition subpartition = createFileBoundedBlockingSubpartition(); final int numberOfBuffers = 2; writeBuffers(subpartition, numberOfBuffers); final VerifyNotificationBufferAvailabilityListener listener = new VerifyNotificationBufferAvailabilityListener(); final ResultSubpartitionView subpartitionView = subpartition.createReadView(listener); // the notification is triggered while creating view assertTrue(listener.isAvailable); listener.resetAvailable(); assertFalse(listener.isAvailable); final BufferAndBacklog buffer1 = subpartitionView.getNextBuffer(); final BufferAndBacklog buffer2 = subpartitionView.getNextBuffer(); assertNotNull(buffer1); assertNotNull(buffer2); // the next buffer is null in view because FileBufferReader has no available buffers for reading ahead assertFalse(subpartitionView.isAvailable(Integer.MAX_VALUE)); // recycle a buffer to trigger notification of data available buffer1.buffer().recycleBuffer(); assertTrue(listener.isAvailable); // cleanup buffer2.buffer().recycleBuffer(); subpartitionView.releaseAllResources(); subpartition.release(); }
Example #22
Source File: BoundedBlockingSubpartitionAvailabilityTest.java From flink with Apache License 2.0 | 5 votes |
private static List<BufferAndBacklog> drainAvailableData(ResultSubpartitionView reader) throws Exception { final ArrayList<BufferAndBacklog> list = new ArrayList<>(); BufferAndBacklog bab; while ((bab = reader.getNextBuffer()) != null) { list.add(bab); } return list; }
Example #23
Source File: LocalInputChannel.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override Optional<BufferAndAvailability> getNextBuffer() throws IOException, InterruptedException { checkError(); ResultSubpartitionView subpartitionView = this.subpartitionView; if (subpartitionView == null) { // There is a possible race condition between writing a EndOfPartitionEvent (1) and flushing (3) the Local // channel on the sender side, and reading EndOfPartitionEvent (2) and processing flush notification (4). When // they happen in that order (1 - 2 - 3 - 4), flush notification can re-enqueue LocalInputChannel after (or // during) it was released during reading the EndOfPartitionEvent (2). if (isReleased) { return Optional.empty(); } // this can happen if the request for the partition was triggered asynchronously // by the time trigger // would be good to avoid that, by guaranteeing that the requestPartition() and // getNextBuffer() always come from the same thread // we could do that by letting the timer insert a special "requesting channel" into the input gate's queue subpartitionView = checkAndWaitForSubpartitionView(); } BufferAndBacklog next = subpartitionView.getNextBuffer(); if (next == null) { if (subpartitionView.isReleased()) { throw new CancelTaskException("Consumed partition " + subpartitionView + " has been released."); } else { return Optional.empty(); } } numBytesIn.inc(next.buffer().getSizeUnsafe()); numBuffersIn.inc(); return Optional.of(new BufferAndAvailability(next.buffer(), next.isMoreAvailable(), next.buffersInBacklog())); }
Example #24
Source File: PipelinedSubpartitionView.java From flink with Apache License 2.0 | 4 votes |
@Nullable @Override public BufferAndBacklog getNextBuffer() { return parent.pollBuffer(); }
Example #25
Source File: LocalInputChannel.java From flink with Apache License 2.0 | 4 votes |
@Override Optional<BufferAndAvailability> getNextBuffer() throws IOException, InterruptedException { checkError(); ResultSubpartitionView subpartitionView = this.subpartitionView; if (subpartitionView == null) { // There is a possible race condition between writing a EndOfPartitionEvent (1) and flushing (3) the Local // channel on the sender side, and reading EndOfPartitionEvent (2) and processing flush notification (4). When // they happen in that order (1 - 2 - 3 - 4), flush notification can re-enqueue LocalInputChannel after (or // during) it was released during reading the EndOfPartitionEvent (2). if (isReleased) { return Optional.empty(); } // this can happen if the request for the partition was triggered asynchronously // by the time trigger // would be good to avoid that, by guaranteeing that the requestPartition() and // getNextBuffer() always come from the same thread // we could do that by letting the timer insert a special "requesting channel" into the input gate's queue subpartitionView = checkAndWaitForSubpartitionView(); } BufferAndBacklog next = subpartitionView.getNextBuffer(); if (next == null) { if (subpartitionView.isReleased()) { throw new CancelTaskException("Consumed partition " + subpartitionView + " has been released."); } else { return Optional.empty(); } } Buffer buffer = next.buffer(); CheckpointBarrier notifyReceivedBarrier = parseCheckpointBarrierOrNull(buffer); if (notifyReceivedBarrier != null) { receivedCheckpointId = notifyReceivedBarrier.getId(); } else if (receivedCheckpointId < lastRequestedCheckpointId && buffer.isBuffer()) { inputGate.getBufferReceivedListener().notifyBufferReceived(buffer.retainBuffer(), channelInfo); } numBytesIn.inc(buffer.getSize()); numBuffersIn.inc(); return Optional.of(new BufferAndAvailability(buffer, next.isDataAvailable(), next.buffersInBacklog())); }
Example #26
Source File: BoundedBlockingSubpartitionAvailabilityTest.java From flink with Apache License 2.0 | 4 votes |
private static void drainAllData(ResultSubpartitionView reader) throws Exception { BufferAndBacklog bab; while ((bab = reader.getNextBuffer()) != null) { bab.buffer().recycleBuffer(); } }
Example #27
Source File: TestSubpartitionConsumer.java From flink with Apache License 2.0 | 4 votes |
@Override public Boolean call() throws Exception { try { while (true) { if (Thread.interrupted()) { throw new InterruptedException(); } synchronized (dataAvailableNotification) { while (!dataAvailableNotification.getAndSet(false)) { dataAvailableNotification.wait(); } } final BufferAndBacklog bufferAndBacklog = subpartitionView.getNextBuffer(); if (isSlowConsumer) { Thread.sleep(random.nextInt(MAX_SLEEP_TIME_MS + 1)); } if (bufferAndBacklog != null) { if (bufferAndBacklog.isDataAvailable()) { dataAvailableNotification.set(true); } if (bufferAndBacklog.buffer().isBuffer()) { callback.onBuffer(bufferAndBacklog.buffer()); } else { final AbstractEvent event = EventSerializer.fromBuffer(bufferAndBacklog.buffer(), getClass().getClassLoader()); callback.onEvent(event); bufferAndBacklog.buffer().recycleBuffer(); if (event.getClass() == EndOfPartitionEvent.class) { subpartitionView.releaseAllResources(); return true; } } } else if (subpartitionView.isReleased()) { return true; } } } finally { subpartitionView.releaseAllResources(); } }
Example #28
Source File: TestSubpartitionConsumer.java From flink with Apache License 2.0 | 4 votes |
@Override public Boolean call() throws Exception { try { while (true) { if (Thread.interrupted()) { throw new InterruptedException(); } synchronized (dataAvailableNotification) { while (!dataAvailableNotification.getAndSet(false)) { dataAvailableNotification.wait(); } } final BufferAndBacklog bufferAndBacklog = subpartitionView.getNextBuffer(); if (isSlowConsumer) { Thread.sleep(random.nextInt(MAX_SLEEP_TIME_MS + 1)); } if (bufferAndBacklog != null) { if (bufferAndBacklog.isMoreAvailable()) { dataAvailableNotification.set(true); } if (bufferAndBacklog.buffer().isBuffer()) { callback.onBuffer(bufferAndBacklog.buffer()); } else { final AbstractEvent event = EventSerializer.fromBuffer(bufferAndBacklog.buffer(), getClass().getClassLoader()); callback.onEvent(event); bufferAndBacklog.buffer().recycleBuffer(); if (event.getClass() == EndOfPartitionEvent.class) { subpartitionView.notifySubpartitionConsumed(); return true; } } } else if (subpartitionView.isReleased()) { return true; } } } finally { subpartitionView.releaseAllResources(); } }
Example #29
Source File: BoundedBlockingSubpartitionAvailabilityTest.java From flink with Apache License 2.0 | 4 votes |
private static void drainAllData(ResultSubpartitionView reader) throws Exception { BufferAndBacklog bab; while ((bab = reader.getNextBuffer()) != null) { bab.buffer().recycleBuffer(); } }
Example #30
Source File: SingleInputGateTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testBackwardsEventWithUninitializedChannel() throws Exception { // Setup environment final TaskEventDispatcher taskEventDispatcher = mock(TaskEventDispatcher.class); when(taskEventDispatcher.publish(any(ResultPartitionID.class), any(TaskEvent.class))).thenReturn(true); final ResultSubpartitionView iterator = mock(ResultSubpartitionView.class); when(iterator.getNextBuffer()).thenReturn( new BufferAndBacklog(new NetworkBuffer(MemorySegmentFactory.allocateUnpooledSegment(1024), FreeingBufferRecycler.INSTANCE), false, 0, false)); final ResultPartitionManager partitionManager = mock(ResultPartitionManager.class); when(partitionManager.createSubpartitionView( any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class))).thenReturn(iterator); // Setup reader with one local and one unknown input channel NettyShuffleEnvironment environment = createNettyShuffleEnvironment(); final SingleInputGate inputGate = createInputGate(environment, 2, ResultPartitionType.PIPELINED); try { // Local ResultPartitionID localPartitionId = new ResultPartitionID(); InputChannelBuilder.newBuilder() .setPartitionId(localPartitionId) .setPartitionManager(partitionManager) .setTaskEventPublisher(taskEventDispatcher) .buildLocalAndSetToGate(inputGate); // Unknown ResultPartitionID unknownPartitionId = new ResultPartitionID(); InputChannelBuilder.newBuilder() .setChannelIndex(1) .setPartitionId(unknownPartitionId) .setPartitionManager(partitionManager) .setTaskEventPublisher(taskEventDispatcher) .buildUnknownAndSetToGate(inputGate); inputGate.setup(); // Only the local channel can request verify(partitionManager, times(1)).createSubpartitionView(any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class)); // Send event backwards and initialize unknown channel afterwards final TaskEvent event = new TestTaskEvent(); inputGate.sendTaskEvent(event); // Only the local channel can send out the event verify(taskEventDispatcher, times(1)).publish(any(ResultPartitionID.class), any(TaskEvent.class)); // After the update, the pending event should be send to local channel ResourceID location = ResourceID.generate(); inputGate.updateInputChannel(location, createRemoteWithIdAndLocation(unknownPartitionId.getPartitionId(), location)); verify(partitionManager, times(2)).createSubpartitionView(any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class)); verify(taskEventDispatcher, times(2)).publish(any(ResultPartitionID.class), any(TaskEvent.class)); } finally { inputGate.close(); environment.close(); } }