org.apache.flink.runtime.execution.CancelTaskException Java Examples
The following examples show how to use
org.apache.flink.runtime.execution.CancelTaskException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: InputChannel.java From flink with Apache License 2.0 | 6 votes |
/** * Checks for an error and rethrows it if one was reported. * * <p>Note: Any {@link PartitionException} instances should not be transformed * and make sure they are always visible in task failure cause. */ protected void checkError() throws IOException { final Throwable t = cause.get(); if (t != null) { if (t instanceof CancelTaskException) { throw (CancelTaskException) t; } if (t instanceof IOException) { throw (IOException) t; } else { throw new IOException(t); } } }
Example #2
Source File: StreamSourceOperatorWatermarksTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testNoMaxWatermarkOnAsyncCancel() throws Exception { StreamSource<String, ?> sourceOperator = new StreamSource<>(new InfiniteSource<>()); StreamTaskTestHarness<String> testHarness = setupSourceStreamTask(sourceOperator, BasicTypeInfo.STRING_TYPE_INFO); testHarness.invoke(); testHarness.waitForTaskRunning(); Thread.sleep(200); testHarness.getTask().cancel(); try { testHarness.waitForTaskCompletion(); } catch (Throwable t) { if (!ExceptionUtils.findThrowable(t, CancelTaskException.class).isPresent()) { throw t; } } assertTrue(testHarness.getOutput().isEmpty()); }
Example #3
Source File: StreamSourceOperatorWatermarksTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testNoMaxWatermarkOnImmediateCancel() throws Exception { StreamSource<String, ?> sourceOperator = new StreamSource<>(new InfiniteSource<>()); StreamTaskTestHarness<String> testHarness = setupSourceStreamTask( sourceOperator, BasicTypeInfo.STRING_TYPE_INFO, true); testHarness.invoke(); try { testHarness.waitForTaskCompletion(); fail("should throw an exception"); } catch (Throwable t) { if (!ExceptionUtils.findThrowable(t, CancelTaskException.class).isPresent()) { throw t; } } assertTrue(testHarness.getOutput().isEmpty()); }
Example #4
Source File: SourceStreamTask.java From flink with Apache License 2.0 | 6 votes |
@Override protected void processInput(MailboxDefaultAction.Controller controller) throws Exception { controller.suspendDefaultAction(); // Against the usual contract of this method, this implementation is not step-wise but blocking instead for // compatibility reasons with the current source interface (source functions run as a loop, not in steps). sourceThread.setTaskDescription(getName()); sourceThread.start(); sourceThread.getCompletionFuture().whenComplete((Void ignore, Throwable sourceThreadThrowable) -> { if (isCanceled() && ExceptionUtils.findThrowable(sourceThreadThrowable, InterruptedException.class).isPresent()) { mailboxProcessor.reportThrowable(new CancelTaskException(sourceThreadThrowable)); } else if (!isFinished && sourceThreadThrowable != null) { mailboxProcessor.reportThrowable(sourceThreadThrowable); } else { mailboxProcessor.allActionsCompleted(); } }); }
Example #5
Source File: LocalInputChannelTest.java From flink with Apache License 2.0 | 6 votes |
/** * Tests that reading from a channel when after the partition has been * released are handled and don't lead to NPEs. */ @Test public void testGetNextAfterPartitionReleased() throws Exception { ResultSubpartitionView subpartitionView = createResultSubpartitionView(false); TestingResultPartitionManager partitionManager = new TestingResultPartitionManager(subpartitionView); LocalInputChannel channel = createLocalInputChannel(new SingleInputGateBuilder().build(), partitionManager); channel.requestSubpartition(0); assertFalse(channel.getNextBuffer().isPresent()); // release the subpartition view subpartitionView.releaseAllResources(); try { channel.getNextBuffer(); fail("Did not throw expected CancelTaskException"); } catch (CancelTaskException ignored) { } channel.releaseAllResources(); assertFalse(channel.getNextBuffer().isPresent()); }
Example #6
Source File: LocalInputChannelTest.java From flink with Apache License 2.0 | 6 votes |
@Test(expected = CancelTaskException.class) public void testProducerFailedException() throws Exception { ResultSubpartitionView view = mock(ResultSubpartitionView.class); when(view.isReleased()).thenReturn(true); when(view.getFailureCause()).thenReturn(new Exception("Expected test exception")); ResultPartitionManager partitionManager = mock(ResultPartitionManager.class); when(partitionManager .createSubpartitionView(any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class))) .thenReturn(view); SingleInputGate inputGate = mock(SingleInputGate.class); BufferProvider bufferProvider = mock(BufferProvider.class); when(inputGate.getBufferProvider()).thenReturn(bufferProvider); LocalInputChannel ch = createLocalInputChannel(inputGate, partitionManager); ch.requestSubpartition(0); // Should throw an instance of CancelTaskException. ch.getNextBuffer(); }
Example #7
Source File: RemoteInputChannelTest.java From flink with Apache License 2.0 | 6 votes |
@Test(expected = CancelTaskException.class) public void testProducerFailedException() throws Exception { ConnectionManager connManager = mock(ConnectionManager.class); when(connManager.createPartitionRequestClient(any(ConnectionID.class))) .thenReturn(mock(PartitionRequestClient.class)); final SingleInputGate gate = createSingleInputGate(1); final RemoteInputChannel ch = InputChannelTestUtils.createRemoteInputChannel(gate, 0, connManager); ch.onError(new ProducerFailedException(new RuntimeException("Expected test exception."))); ch.requestSubpartition(0); // Should throw an instance of CancelTaskException. ch.getNextBuffer(); }
Example #8
Source File: PartitionRequestQueueTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testProducerFailedException() throws Exception { PartitionRequestQueue queue = new PartitionRequestQueue(); ResultSubpartitionView view = new ReleasedResultSubpartitionView(); ResultPartitionProvider partitionProvider = (partitionId, index, availabilityListener) -> view; EmbeddedChannel ch = new EmbeddedChannel(queue); CreditBasedSequenceNumberingViewReader seqView = new CreditBasedSequenceNumberingViewReader(new InputChannelID(), 2, queue); seqView.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0); // Add available buffer to trigger enqueue the erroneous view seqView.notifyDataAvailable(); ch.runPendingTasks(); // Read the enqueued msg Object msg = ch.readOutbound(); assertEquals(msg.getClass(), NettyMessage.ErrorResponse.class); NettyMessage.ErrorResponse err = (NettyMessage.ErrorResponse) msg; assertTrue(err.cause instanceof CancelTaskException); }
Example #9
Source File: SingleInputGate.java From flink with Apache License 2.0 | 6 votes |
private Optional<BufferOrEvent> getNextBufferOrEvent(boolean blocking) throws IOException, InterruptedException { if (hasReceivedAllEndOfPartitionEvents) { return Optional.empty(); } if (closeFuture.isDone()) { throw new CancelTaskException("Input gate is already closed."); } Optional<InputWithData<InputChannel, BufferAndAvailability>> next = waitAndGetNextData(blocking); if (!next.isPresent()) { return Optional.empty(); } InputWithData<InputChannel, BufferAndAvailability> inputWithData = next.get(); return Optional.of(transformToBufferOrEvent( inputWithData.data.buffer(), inputWithData.moreAvailable, inputWithData.input)); }
Example #10
Source File: BufferManager.java From flink with Apache License 2.0 | 6 votes |
Buffer requestBufferBlocking() throws IOException, InterruptedException { synchronized (bufferQueue) { Buffer buffer; while ((buffer = bufferQueue.takeBuffer()) == null) { if (inputChannel.isReleased()) { throw new CancelTaskException("Input channel [" + inputChannel.channelInfo + "] has already been released."); } if (!isWaitingForFloatingBuffers) { BufferPool bufferPool = inputChannel.inputGate.getBufferPool(); buffer = bufferPool.requestBuffer(); if (buffer == null && shouldContinueRequest(bufferPool)) { continue; } } if (buffer != null) { return buffer; } bufferQueue.wait(); } return buffer; } }
Example #11
Source File: RemoteInputChannel.java From flink with Apache License 2.0 | 6 votes |
@Override Optional<BufferAndAvailability> getNextBuffer() throws IOException { checkPartitionRequestQueueInitialized(); final Buffer next; final boolean moreAvailable; synchronized (receivedBuffers) { next = receivedBuffers.poll(); moreAvailable = !receivedBuffers.isEmpty(); } if (next == null) { if (isReleased.get()) { throw new CancelTaskException("Queried for a buffer after channel has been released."); } else { throw new IllegalStateException("There should always have queued buffers for unreleased channel."); } } numBytesIn.inc(next.getSize()); numBuffersIn.inc(); return Optional.of(new BufferAndAvailability(next, moreAvailable, 0)); }
Example #12
Source File: LocalInputChannelTest.java From flink with Apache License 2.0 | 6 votes |
@Test(expected = CancelTaskException.class) public void testProducerFailedException() throws Exception { ResultSubpartitionView view = mock(ResultSubpartitionView.class); when(view.isReleased()).thenReturn(true); when(view.getFailureCause()).thenReturn(new Exception("Expected test exception")); ResultPartitionManager partitionManager = mock(ResultPartitionManager.class); when(partitionManager .createSubpartitionView(any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class))) .thenReturn(view); SingleInputGate inputGate = mock(SingleInputGate.class); BufferProvider bufferProvider = mock(BufferProvider.class); when(inputGate.getBufferProvider()).thenReturn(bufferProvider); LocalInputChannel ch = createLocalInputChannel(inputGate, partitionManager); ch.requestSubpartition(0); // Should throw an instance of CancelTaskException. ch.getNextBuffer(); }
Example #13
Source File: InputChannel.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Checks for an error and rethrows it if one was reported. */ protected void checkError() throws IOException { final Throwable t = cause.get(); if (t != null) { if (t instanceof CancelTaskException) { throw (CancelTaskException) t; } if (t instanceof IOException) { throw (IOException) t; } else { throw new IOException(t); } } }
Example #14
Source File: PartitionRequestQueueTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testProducerFailedException() throws Exception { PartitionRequestQueue queue = new PartitionRequestQueue(); ResultSubpartitionView view = new ReleasedResultSubpartitionView(); ResultPartitionProvider partitionProvider = (partitionId, index, availabilityListener) -> view; EmbeddedChannel ch = new EmbeddedChannel(queue); CreditBasedSequenceNumberingViewReader seqView = new CreditBasedSequenceNumberingViewReader(new InputChannelID(), 2, queue); seqView.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0); // Add available buffer to trigger enqueue the erroneous view seqView.notifyDataAvailable(); ch.runPendingTasks(); // Read the enqueued msg Object msg = ch.readOutbound(); assertEquals(msg.getClass(), NettyMessage.ErrorResponse.class); NettyMessage.ErrorResponse err = (NettyMessage.ErrorResponse) msg; assertTrue(err.cause instanceof CancelTaskException); }
Example #15
Source File: PartitionRequestQueueTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testProducerFailedException() throws Exception { PartitionRequestQueue queue = new PartitionRequestQueue(); ResultSubpartitionView view = new ReleasedResultSubpartitionView(); ResultPartitionProvider partitionProvider = (partitionId, index, availabilityListener) -> view; EmbeddedChannel ch = new EmbeddedChannel(queue); CreditBasedSequenceNumberingViewReader seqView = new CreditBasedSequenceNumberingViewReader(new InputChannelID(), 2, queue); seqView.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0); // Add available buffer to trigger enqueue the erroneous view seqView.notifyDataAvailable(); ch.runPendingTasks(); // Read the enqueued msg Object msg = ch.readOutbound(); assertEquals(msg.getClass(), NettyMessage.ErrorResponse.class); NettyMessage.ErrorResponse err = (NettyMessage.ErrorResponse) msg; assertTrue(err.cause instanceof CancelTaskException); }
Example #16
Source File: RemoteInputChannelTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test(expected = CancelTaskException.class) public void testProducerFailedException() throws Exception { ConnectionManager connManager = mock(ConnectionManager.class); when(connManager.createPartitionRequestClient(any(ConnectionID.class))) .thenReturn(mock(PartitionRequestClient.class)); final RemoteInputChannel ch = new RemoteInputChannel( mock(SingleInputGate.class), 0, new ResultPartitionID(), mock(ConnectionID.class), connManager, UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup()); ch.onError(new ProducerFailedException(new RuntimeException("Expected test exception."))); ch.requestSubpartition(0); // Should throw an instance of CancelTaskException. ch.getNextBuffer(); }
Example #17
Source File: LocalInputChannelTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test(expected = CancelTaskException.class) public void testProducerFailedException() throws Exception { ResultSubpartitionView view = mock(ResultSubpartitionView.class); when(view.isReleased()).thenReturn(true); when(view.getFailureCause()).thenReturn(new Exception("Expected test exception")); ResultPartitionManager partitionManager = mock(ResultPartitionManager.class); when(partitionManager .createSubpartitionView(any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class))) .thenReturn(view); SingleInputGate inputGate = mock(SingleInputGate.class); BufferProvider bufferProvider = mock(BufferProvider.class); when(inputGate.getBufferProvider()).thenReturn(bufferProvider); LocalInputChannel ch = createLocalInputChannel( inputGate, partitionManager, new Tuple2<>(0, 0)); ch.requestSubpartition(0); // Should throw an instance of CancelTaskException. ch.getNextBuffer(); }
Example #18
Source File: InputChannel.java From flink with Apache License 2.0 | 6 votes |
/** * Checks for an error and rethrows it if one was reported. * * <p>Note: Any {@link PartitionException} instances should not be transformed * and make sure they are always visible in task failure cause. */ protected void checkError() throws IOException { final Throwable t = cause.get(); if (t != null) { if (t instanceof CancelTaskException) { throw (CancelTaskException) t; } if (t instanceof IOException) { throw (IOException) t; } else { throw new IOException(t); } } }
Example #19
Source File: SourceStreamTaskTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testNotMarkingEndOfInputWhenTaskCancelled () throws Exception { final StreamTaskTestHarness<String> testHarness = new StreamTaskTestHarness<>( SourceStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO); testHarness .setupOperatorChain( new OperatorID(), new StreamSource<>(new CancelTestSource( BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()), "Hello"))) .chain( new OperatorID(), new TestBoundedOneInputStreamOperator("Operator1"), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig())) .finish(); StreamConfig streamConfig = testHarness.getStreamConfig(); streamConfig.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>(); testHarness.invoke(); CancelTestSource.getDataProcessing().await(); testHarness.getTask().cancel(); try { testHarness.waitForTaskCompletion(); } catch (Throwable t) { if (!ExceptionUtils.findThrowable(t, CancelTaskException.class).isPresent()) { throw t; } } expectedOutput.add(new StreamRecord<>("Hello")); TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput()); }
Example #20
Source File: StreamTask.java From flink with Apache License 2.0 | 5 votes |
@Override public final void invoke() throws Exception { try { beforeInvoke(); // final check to exit early before starting to run if (canceled) { throw new CancelTaskException(); } // let the task do its work runMailboxLoop(); // if this left the run() method cleanly despite the fact that this was canceled, // make sure the "clean shutdown" is not attempted if (canceled) { throw new CancelTaskException(); } afterInvoke(); } catch (Exception invokeException) { try { cleanUpInvoke(); } // TODO: investigate why Throwable instead of Exception is used here. catch (Throwable cleanUpException) { Throwable throwable = ExceptionUtils.firstOrSuppressed(cleanUpException, invokeException); throw (throwable instanceof Exception ? (Exception) throwable : new Exception(throwable)); } throw invokeException; } cleanUpInvoke(); }
Example #21
Source File: StreamTask.java From flink with Apache License 2.0 | 5 votes |
protected void beforeInvoke() throws Exception { disposedOperators = false; LOG.debug("Initializing {}.", getName()); operatorChain = new OperatorChain<>(this, recordWriter); headOperator = operatorChain.getHeadOperator(); // task specific initialization init(); // save the work of reloading state, etc, if the task is already canceled if (canceled) { throw new CancelTaskException(); } // -------- Invoke -------- LOG.debug("Invoking {}", getName()); // we need to make sure that any triggers scheduled in open() cannot be // executed before all operators are opened actionExecutor.runThrowing(() -> { // both the following operations are protected by the lock // so that we avoid race conditions in the case that initializeState() // registers a timer, that fires before the open() is called. operatorChain.initializeStateAndOpenOperators(createStreamTaskStateInitializer()); readRecoveredChannelState(); }); isRunning = true; }
Example #22
Source File: SynchronousCheckpointTest.java From flink with Apache License 2.0 | 5 votes |
private void waitUntilMainExecutionThreadIsFinished() { try { taskInvocation.get(); } catch (Exception e) { assertThat(e.getCause(), is(instanceOf(CancelTaskException.class))); } }
Example #23
Source File: StreamTaskTest.java From flink with Apache License 2.0 | 5 votes |
void waitForTaskCompletion(boolean cancelled) throws Exception { try { invocationFuture.get(); } catch (Exception e) { if (cancelled) { assertThat(e.getCause(), is(instanceOf(CancelTaskException.class))); } else { throw e; } } assertThat(streamTask.isCanceled(), is(cancelled)); }
Example #24
Source File: TaskTest.java From flink with Apache License 2.0 | 5 votes |
@Override public void invoke() { awaitLatch.trigger(); try { triggerLatch.await(); } catch (Throwable ignored) {} throw new CancelTaskException(); }
Example #25
Source File: LocalInputChannel.java From flink with Apache License 2.0 | 5 votes |
@Override Optional<BufferAndAvailability> getNextBuffer() throws IOException, InterruptedException { checkError(); ResultSubpartitionView subpartitionView = this.subpartitionView; if (subpartitionView == null) { // There is a possible race condition between writing a EndOfPartitionEvent (1) and flushing (3) the Local // channel on the sender side, and reading EndOfPartitionEvent (2) and processing flush notification (4). When // they happen in that order (1 - 2 - 3 - 4), flush notification can re-enqueue LocalInputChannel after (or // during) it was released during reading the EndOfPartitionEvent (2). if (isReleased) { return Optional.empty(); } // this can happen if the request for the partition was triggered asynchronously // by the time trigger // would be good to avoid that, by guaranteeing that the requestPartition() and // getNextBuffer() always come from the same thread // we could do that by letting the timer insert a special "requesting channel" into the input gate's queue subpartitionView = checkAndWaitForSubpartitionView(); } BufferAndBacklog next = subpartitionView.getNextBuffer(); if (next == null) { if (subpartitionView.isReleased()) { throw new CancelTaskException("Consumed partition " + subpartitionView + " has been released."); } else { return Optional.empty(); } } numBytesIn.inc(next.buffer().getSize()); numBuffersIn.inc(); return Optional.of(new BufferAndAvailability(next.buffer(), next.isMoreAvailable(), next.buffersInBacklog())); }
Example #26
Source File: BufferManager.java From flink with Apache License 2.0 | 5 votes |
private boolean shouldContinueRequest(BufferPool bufferPool) { if (bufferPool.addBufferListener(this)) { isWaitingForFloatingBuffers = true; numRequiredBuffers = 1; return false; } else if (bufferPool.isDestroyed()) { throw new CancelTaskException("Local buffer pool has already been released."); } else { return true; } }
Example #27
Source File: TaskTest.java From flink with Apache License 2.0 | 5 votes |
@Override public void invoke() { awaitLatch.trigger(); try { triggerLatch.await(); } catch (Throwable ignored) {} throw new CancelTaskException(); }
Example #28
Source File: LocalInputChannel.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override Optional<BufferAndAvailability> getNextBuffer() throws IOException, InterruptedException { checkError(); ResultSubpartitionView subpartitionView = this.subpartitionView; if (subpartitionView == null) { // There is a possible race condition between writing a EndOfPartitionEvent (1) and flushing (3) the Local // channel on the sender side, and reading EndOfPartitionEvent (2) and processing flush notification (4). When // they happen in that order (1 - 2 - 3 - 4), flush notification can re-enqueue LocalInputChannel after (or // during) it was released during reading the EndOfPartitionEvent (2). if (isReleased) { return Optional.empty(); } // this can happen if the request for the partition was triggered asynchronously // by the time trigger // would be good to avoid that, by guaranteeing that the requestPartition() and // getNextBuffer() always come from the same thread // we could do that by letting the timer insert a special "requesting channel" into the input gate's queue subpartitionView = checkAndWaitForSubpartitionView(); } BufferAndBacklog next = subpartitionView.getNextBuffer(); if (next == null) { if (subpartitionView.isReleased()) { throw new CancelTaskException("Consumed partition " + subpartitionView + " has been released."); } else { return Optional.empty(); } } numBytesIn.inc(next.buffer().getSizeUnsafe()); numBuffersIn.inc(); return Optional.of(new BufferAndAvailability(next.buffer(), next.isMoreAvailable(), next.buffersInBacklog())); }
Example #29
Source File: TaskTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void invoke() { awaitLatch.trigger(); try { triggerLatch.await(); } catch (Throwable ignored) {} throw new CancelTaskException(); }
Example #30
Source File: StreamTaskTest.java From flink with Apache License 2.0 | 5 votes |
void waitForTaskCompletion(boolean cancelled) throws Exception { try { invocationFuture.get(); } catch (Exception e) { if (cancelled) { assertThat(e.getCause(), is(instanceOf(CancelTaskException.class))); } else { throw e; } } assertThat(streamTask.isCanceled(), is(cancelled)); }