Java Code Examples for org.apache.flink.api.common.ExecutionConfig#setRestartStrategy()
The following examples show how to use
org.apache.flink.api.common.ExecutionConfig#setRestartStrategy() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JobRecoveryITCase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private JobGraph createjobGraph(boolean slotSharingEnabled) throws IOException { final JobVertex sender = new JobVertex("Sender"); sender.setParallelism(PARALLELISM); sender.setInvokableClass(TestingAbstractInvokables.Sender.class); final JobVertex receiver = new JobVertex("Receiver"); receiver.setParallelism(PARALLELISM); receiver.setInvokableClass(FailingOnceReceiver.class); FailingOnceReceiver.reset(); if (slotSharingEnabled) { final SlotSharingGroup slotSharingGroup = new SlotSharingGroup(); receiver.setSlotSharingGroup(slotSharingGroup); sender.setSlotSharingGroup(slotSharingGroup); } receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); final ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0L)); final JobGraph jobGraph = new JobGraph(getClass().getSimpleName(), sender, receiver); jobGraph.setExecutionConfig(executionConfig); return jobGraph; }
Example 2
Source File: SchedulingITCase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Nonnull private JobGraph createJobGraph(long delay, int parallelism) throws IOException { SlotSharingGroup slotSharingGroup = new SlotSharingGroup(); final JobVertex source = new JobVertex("source"); source.setInvokableClass(OneTimeFailingInvokable.class); source.setParallelism(parallelism); source.setSlotSharingGroup(slotSharingGroup); final JobVertex sink = new JobVertex("sink"); sink.setInvokableClass(NoOpInvokable.class); sink.setParallelism(parallelism); sink.setSlotSharingGroup(slotSharingGroup); sink.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); JobGraph jobGraph = new JobGraph(source, sink); jobGraph.setScheduleMode(ScheduleMode.EAGER); ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, delay)); jobGraph.setExecutionConfig(executionConfig); return jobGraph; }
Example 3
Source File: SchedulingITCase.java From flink with Apache License 2.0 | 6 votes |
@Nonnull private JobGraph createJobGraph(long delay, int parallelism) throws IOException { SlotSharingGroup slotSharingGroup = new SlotSharingGroup(); final JobVertex source = new JobVertex("source"); source.setInvokableClass(OneTimeFailingInvokable.class); source.setParallelism(parallelism); source.setSlotSharingGroup(slotSharingGroup); final JobVertex sink = new JobVertex("sink"); sink.setInvokableClass(NoOpInvokable.class); sink.setParallelism(parallelism); sink.setSlotSharingGroup(slotSharingGroup); sink.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); JobGraph jobGraph = new JobGraph(source, sink); jobGraph.setScheduleMode(ScheduleMode.EAGER); ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, delay)); jobGraph.setExecutionConfig(executionConfig); return jobGraph; }
Example 4
Source File: SchedulingITCase.java From flink with Apache License 2.0 | 6 votes |
@Nonnull private JobGraph createJobGraph(long delay, int parallelism) throws IOException { SlotSharingGroup slotSharingGroup = new SlotSharingGroup(); final JobVertex source = new JobVertex("source"); source.setInvokableClass(OneTimeFailingInvokable.class); source.setParallelism(parallelism); source.setSlotSharingGroup(slotSharingGroup); final JobVertex sink = new JobVertex("sink"); sink.setInvokableClass(NoOpInvokable.class); sink.setParallelism(parallelism); sink.setSlotSharingGroup(slotSharingGroup); sink.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); JobGraph jobGraph = new JobGraph(source, sink); jobGraph.setScheduleMode(ScheduleMode.EAGER); ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, delay)); jobGraph.setExecutionConfig(executionConfig); return jobGraph; }
Example 5
Source File: JobRecoveryITCase.java From flink with Apache License 2.0 | 6 votes |
private JobGraph createjobGraph(boolean slotSharingEnabled) throws IOException { final JobVertex sender = new JobVertex("Sender"); sender.setParallelism(PARALLELISM); sender.setInvokableClass(TestingAbstractInvokables.Sender.class); final JobVertex receiver = new JobVertex("Receiver"); receiver.setParallelism(PARALLELISM); receiver.setInvokableClass(FailingOnceReceiver.class); FailingOnceReceiver.reset(); if (slotSharingEnabled) { final SlotSharingGroup slotSharingGroup = new SlotSharingGroup(); receiver.setSlotSharingGroup(slotSharingGroup); sender.setSlotSharingGroup(slotSharingGroup); } receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); final ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0L)); final JobGraph jobGraph = new JobGraph(getClass().getSimpleName(), sender, receiver); jobGraph.setExecutionConfig(executionConfig); return jobGraph; }
Example 6
Source File: MiniClusterITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private static JobGraph getSimpleJob(int parallelism) throws IOException { final JobVertex task = new JobVertex("Test task"); task.setParallelism(parallelism); task.setMaxParallelism(parallelism); task.setInvokableClass(NoOpInvokable.class); final JobGraph jg = new JobGraph(new JobID(), "Test Job", task); jg.setScheduleMode(ScheduleMode.EAGER); final ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000)); jg.setExecutionConfig(executionConfig); return jg; }
Example 7
Source File: JobMasterTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private JobGraph createSingleVertexJobWithRestartStrategy() throws IOException { final JobGraph jobGraph = createSingleVertexJobGraph(); final ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 0L)); jobGraph.setExecutionConfig(executionConfig); return jobGraph; }
Example 8
Source File: JobMasterTest.java From flink with Apache License 2.0 | 5 votes |
private JobGraph createSingleVertexJobWithRestartStrategy() throws IOException { final JobGraph jobGraph = JobGraphTestUtils.createSingleVertexJobGraph(); final ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 0L)); jobGraph.setExecutionConfig(executionConfig); return jobGraph; }
Example 9
Source File: ExecutionGraphRestartTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Tests that it is possible to fail a graph via a call to * {@link ExecutionGraph#failGlobal(Throwable)} after cancellation. */ @Test public void testFailExecutionGraphAfterCancel() throws Exception { Instance instance = ExecutionGraphTestUtils.getInstance( new ActorTaskManagerGateway( new SimpleActorGateway(TestingUtils.directExecutionContext())), 2); Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext()); scheduler.newInstanceAvailable(instance); JobVertex vertex = ExecutionGraphTestUtils.createJobVertex("Test Vertex", 1, NoOpInvokable.class); ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart( Integer.MAX_VALUE, Integer.MAX_VALUE)); JobGraph jobGraph = new JobGraph("Test Job", vertex); jobGraph.setExecutionConfig(executionConfig); ExecutionGraph eg = newExecutionGraph(new InfiniteDelayRestartStrategy(), scheduler); eg.attachJobGraph(jobGraph.getVerticesSortedTopologicallyFromSources()); assertEquals(JobStatus.CREATED, eg.getState()); eg.scheduleForExecution(); assertEquals(JobStatus.RUNNING, eg.getState()); // Fail right after cancel (for example with concurrent slot release) eg.cancel(); assertEquals(JobStatus.CANCELLING, eg.getState()); eg.failGlobal(new Exception("Test Exception")); assertEquals(JobStatus.FAILING, eg.getState()); Execution execution = eg.getAllExecutionVertices().iterator().next().getCurrentExecutionAttempt(); execution.completeCancelling(); assertEquals(JobStatus.RESTARTING, eg.getState()); }
Example 10
Source File: TaskExecutorITCase.java From flink with Apache License 2.0 | 5 votes |
private JobGraph createJobGraphWithRestartStrategy(int parallelism) throws IOException { final JobGraph jobGraph = createJobGraph(parallelism); final ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(2, 0L)); jobGraph.setExecutionConfig(executionConfig); return jobGraph; }
Example 11
Source File: MiniClusterITCase.java From flink with Apache License 2.0 | 5 votes |
private static JobGraph getSimpleJob(int parallelism) throws IOException { final JobVertex task = new JobVertex("Test task"); task.setParallelism(parallelism); task.setMaxParallelism(parallelism); task.setInvokableClass(NoOpInvokable.class); final JobGraph jg = new JobGraph(new JobID(), "Test Job", task); jg.setScheduleMode(ScheduleMode.EAGER); final ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000)); jg.setExecutionConfig(executionConfig); return jg; }
Example 12
Source File: JobMasterTest.java From flink with Apache License 2.0 | 5 votes |
private JobGraph createSingleVertexJobWithRestartStrategy() throws IOException { final JobGraph jobGraph = createSingleVertexJobGraph(); final ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 0L)); jobGraph.setExecutionConfig(executionConfig); return jobGraph; }
Example 13
Source File: TaskExecutorITCase.java From flink with Apache License 2.0 | 5 votes |
private JobGraph createJobGraphWithRestartStrategy(int parallelism) throws IOException { final JobGraph jobGraph = createJobGraph(parallelism); final ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0L)); jobGraph.setExecutionConfig(executionConfig); return jobGraph; }
Example 14
Source File: ExecutionGraphRestartTest.java From flink with Apache License 2.0 | 5 votes |
private static JobGraph createJobGraphToCancel() throws IOException { JobVertex vertex = ExecutionGraphTestUtils.createJobVertex("Test Vertex", 1, NoOpInvokable.class); ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart( Integer.MAX_VALUE, Integer.MAX_VALUE)); JobGraph jobGraph = new JobGraph("Test Job", vertex); jobGraph.setExecutionConfig(executionConfig); return jobGraph; }
Example 15
Source File: ExecutionGraphRestartTest.java From flink with Apache License 2.0 | 5 votes |
private static JobGraph createJobGraphToCancel() throws IOException { JobVertex vertex = ExecutionGraphTestUtils.createJobVertex("Test Vertex", 1, NoOpInvokable.class); ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart( Integer.MAX_VALUE, Integer.MAX_VALUE)); JobGraph jobGraph = new JobGraph("Test Job", vertex); jobGraph.setExecutionConfig(executionConfig); return jobGraph; }
Example 16
Source File: JobMasterTest.java From flink with Apache License 2.0 | 4 votes |
private void runRequestNextInputSplitTest(Function<List<List<InputSplit>>, Collection<InputSplit>> expectedRemainingInputSplits) throws Exception { final int parallelism = 2; final int splitsPerTask = 2; final int totalSplits = parallelism * splitsPerTask; final List<TestingInputSplit> allInputSplits = new ArrayList<>(totalSplits); for (int i = 0; i < totalSplits; i++) { allInputSplits.add(new TestingInputSplit(i)); } final InputSplitSource<TestingInputSplit> inputSplitSource = new TestingInputSplitSource(allInputSplits); JobVertex source = new JobVertex("source"); source.setParallelism(parallelism); source.setInputSplitSource(inputSplitSource); source.setInvokableClass(AbstractInvokable.class); final JobGraph inputSplitJobGraph = new JobGraph(source); final ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(100, 0)); inputSplitJobGraph.setExecutionConfig(executionConfig); final JobMaster jobMaster = createJobMaster( configuration, inputSplitJobGraph, haServices, new TestingJobManagerSharedServicesBuilder().build(), heartbeatServices); CompletableFuture<Acknowledge> startFuture = jobMaster.start(jobMasterId); try { // wait for the start to complete startFuture.get(testingTimeout.toMilliseconds(), TimeUnit.MILLISECONDS); final JobMasterGateway jobMasterGateway = jobMaster.getSelfGateway(JobMasterGateway.class); final JobVertexID sourceId = source.getID(); final List<AccessExecution> executions = getExecutions(jobMasterGateway, sourceId); final ExecutionAttemptID initialAttemptId = executions.get(0).getAttemptId(); final List<List<InputSplit>> inputSplitsPerTask = new ArrayList<>(parallelism); // request all input splits for (AccessExecution execution : executions) { inputSplitsPerTask.add(getInputSplits(splitsPerTask, getInputSplitSupplier(sourceId, jobMasterGateway, execution.getAttemptId()))); } final List<InputSplit> allRequestedInputSplits = flattenCollection(inputSplitsPerTask); assertThat(allRequestedInputSplits, containsInAnyOrder(allInputSplits.toArray(EMPTY_TESTING_INPUT_SPLITS))); waitUntilAllExecutionsAreScheduled(jobMasterGateway); // fail the first execution to trigger a failover jobMasterGateway.updateTaskExecutionState(new TaskExecutionState(inputSplitJobGraph.getJobID(), initialAttemptId, ExecutionState.FAILED)).get(); // wait until the job has been recovered waitUntilAllExecutionsAreScheduled(jobMasterGateway); final ExecutionAttemptID restartedAttemptId = getFirstExecution(jobMasterGateway, sourceId).getAttemptId(); final List<InputSplit> inputSplits = getRemainingInputSplits(getInputSplitSupplier(sourceId, jobMasterGateway, restartedAttemptId)); assertThat(inputSplits, containsInAnyOrder(expectedRemainingInputSplits.apply(inputSplitsPerTask).toArray(EMPTY_TESTING_INPUT_SPLITS))); } finally { RpcUtils.terminateRpcEndpoint(jobMaster, testingTimeout); } }
Example 17
Source File: ArchivedExecutionGraphTest.java From flink with Apache License 2.0 | 4 votes |
@BeforeClass public static void setupExecutionGraph() throws Exception { // ------------------------------------------------------------------------------------------------------------- // Setup // ------------------------------------------------------------------------------------------------------------- JobVertexID v1ID = new JobVertexID(); JobVertexID v2ID = new JobVertexID(); JobVertex v1 = new JobVertex("v1", v1ID); JobVertex v2 = new JobVertex("v2", v2ID); v1.setParallelism(1); v2.setParallelism(2); v1.setInvokableClass(AbstractInvokable.class); v2.setInvokableClass(AbstractInvokable.class); List<JobVertex> vertices = new ArrayList<>(Arrays.asList(v1, v2)); ExecutionConfig config = new ExecutionConfig(); config.setExecutionMode(ExecutionMode.BATCH_FORCED); config.setRestartStrategy(new RestartStrategies.NoRestartStrategyConfiguration()); config.setParallelism(4); config.enableObjectReuse(); config.setGlobalJobParameters(new TestJobParameters()); runtimeGraph = new ExecutionGraph( TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), new JobID(), "test job", new Configuration(), new SerializedValue<>(config), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), mock(SlotProvider.class)); runtimeGraph.start(ComponentMainThreadExecutorServiceAdapter.forMainThread()); runtimeGraph.attachJobGraph(vertices); List<ExecutionJobVertex> jobVertices = new ArrayList<>(); jobVertices.add(runtimeGraph.getJobVertex(v1ID)); jobVertices.add(runtimeGraph.getJobVertex(v2ID)); CheckpointStatsTracker statsTracker = new CheckpointStatsTracker( 0, jobVertices, mock(CheckpointCoordinatorConfiguration.class), new UnregisteredMetricsGroup()); CheckpointCoordinatorConfiguration chkConfig = new CheckpointCoordinatorConfiguration( 100, 100, 100, 1, CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION, true, false, 0); runtimeGraph.enableCheckpointing( chkConfig, Collections.<ExecutionJobVertex>emptyList(), Collections.<ExecutionJobVertex>emptyList(), Collections.<ExecutionJobVertex>emptyList(), Collections.<MasterTriggerRestoreHook<?>>emptyList(), new StandaloneCheckpointIDCounter(), new StandaloneCompletedCheckpointStore(1), new MemoryStateBackend(), statsTracker); runtimeGraph.setJsonPlan("{}"); runtimeGraph.getJobVertex(v2ID).getTaskVertices()[0].getCurrentExecutionAttempt().fail(new RuntimeException("This exception was thrown on purpose.")); }
Example 18
Source File: JobMasterTest.java From flink with Apache License 2.0 | 4 votes |
private void runRequestNextInputSplitTest(Function<List<List<InputSplit>>, Collection<InputSplit>> expectedRemainingInputSplits) throws Exception { final int parallelism = 2; final int splitsPerTask = 2; final int totalSplits = parallelism * splitsPerTask; final List<TestingInputSplit> allInputSplits = new ArrayList<>(totalSplits); for (int i = 0; i < totalSplits; i++) { allInputSplits.add(new TestingInputSplit(i)); } final InputSplitSource<TestingInputSplit> inputSplitSource = new TestingInputSplitSource(allInputSplits); JobVertex source = new JobVertex("source"); source.setParallelism(parallelism); source.setInputSplitSource(inputSplitSource); source.setInvokableClass(AbstractInvokable.class); final JobGraph inputSplitjobGraph = new JobGraph(source); inputSplitjobGraph.setAllowQueuedScheduling(true); final ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(100, 0)); inputSplitjobGraph.setExecutionConfig(executionConfig); final JobMaster jobMaster = createJobMaster( configuration, inputSplitjobGraph, haServices, new TestingJobManagerSharedServicesBuilder().build(), heartbeatServices); CompletableFuture<Acknowledge> startFuture = jobMaster.start(jobMasterId); try { // wait for the start to complete startFuture.get(testingTimeout.toMilliseconds(), TimeUnit.MILLISECONDS); final JobMasterGateway jobMasterGateway = jobMaster.getSelfGateway(JobMasterGateway.class); final JobVertexID sourceId = source.getID(); final List<AccessExecution> executions = getExecutions(jobMasterGateway, sourceId); final ExecutionAttemptID initialAttemptId = executions.get(0).getAttemptId(); final List<List<InputSplit>> inputSplitsPerTask = new ArrayList<>(parallelism); // request all input splits for (AccessExecution execution : executions) { inputSplitsPerTask.add(getInputSplits(splitsPerTask, getInputSplitSupplier(sourceId, jobMasterGateway, execution.getAttemptId()))); } final List<InputSplit> allRequestedInputSplits = flattenCollection(inputSplitsPerTask); assertThat(allRequestedInputSplits, containsInAnyOrder(allInputSplits.toArray(EMPTY_TESTING_INPUT_SPLITS))); waitUntilAllExecutionsAreScheduled(jobMasterGateway); // fail the first execution to trigger a failover jobMasterGateway.updateTaskExecutionState(new TaskExecutionState(inputSplitjobGraph.getJobID(), initialAttemptId, ExecutionState.FAILED)).get(); // wait until the job has been recovered waitUntilAllExecutionsAreScheduled(jobMasterGateway); final ExecutionAttemptID restartedAttemptId = getFirstExecution(jobMasterGateway, sourceId).getAttemptId(); final List<InputSplit> inputSplits = getRemainingInputSplits(getInputSplitSupplier(sourceId, jobMasterGateway, restartedAttemptId)); assertThat(inputSplits, containsInAnyOrder(expectedRemainingInputSplits.apply(inputSplitsPerTask).toArray(EMPTY_TESTING_INPUT_SPLITS))); } finally { RpcUtils.terminateRpcEndpoint(jobMaster, testingTimeout); } }
Example 19
Source File: ArchivedExecutionGraphTest.java From flink with Apache License 2.0 | 4 votes |
@BeforeClass public static void setupExecutionGraph() throws Exception { // ------------------------------------------------------------------------------------------------------------- // Setup // ------------------------------------------------------------------------------------------------------------- JobVertexID v1ID = new JobVertexID(); JobVertexID v2ID = new JobVertexID(); JobVertex v1 = new JobVertex("v1", v1ID); JobVertex v2 = new JobVertex("v2", v2ID); v1.setParallelism(1); v2.setParallelism(2); v1.setInvokableClass(AbstractInvokable.class); v2.setInvokableClass(AbstractInvokable.class); JobGraph jobGraph = new JobGraph(v1, v2); ExecutionConfig config = new ExecutionConfig(); config.setExecutionMode(ExecutionMode.BATCH_FORCED); config.setRestartStrategy(new RestartStrategies.NoRestartStrategyConfiguration()); config.setParallelism(4); config.enableObjectReuse(); config.setGlobalJobParameters(new TestJobParameters()); jobGraph.setExecutionConfig(config); runtimeGraph = TestingExecutionGraphBuilder .newBuilder() .setJobGraph(jobGraph) .build(); runtimeGraph.start(ComponentMainThreadExecutorServiceAdapter.forMainThread()); List<ExecutionJobVertex> jobVertices = new ArrayList<>(); jobVertices.add(runtimeGraph.getJobVertex(v1ID)); jobVertices.add(runtimeGraph.getJobVertex(v2ID)); CheckpointStatsTracker statsTracker = new CheckpointStatsTracker( 0, jobVertices, mock(CheckpointCoordinatorConfiguration.class), new UnregisteredMetricsGroup()); CheckpointCoordinatorConfiguration chkConfig = new CheckpointCoordinatorConfiguration( 100, 100, 100, 1, CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION, true, false, false, 0); runtimeGraph.enableCheckpointing( chkConfig, Collections.<ExecutionJobVertex>emptyList(), Collections.<ExecutionJobVertex>emptyList(), Collections.<ExecutionJobVertex>emptyList(), Collections.<MasterTriggerRestoreHook<?>>emptyList(), new StandaloneCheckpointIDCounter(), new StandaloneCompletedCheckpointStore(1), new MemoryStateBackend(), statsTracker); runtimeGraph.setJsonPlan("{}"); runtimeGraph.getJobVertex(v2ID).getTaskVertices()[0].getCurrentExecutionAttempt().fail(new RuntimeException("This exception was thrown on purpose.")); }
Example 20
Source File: ExecutionGraphRestartTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Tests that a graph is not restarted after cancellation via a call to * {@link ExecutionGraph#failGlobal(Throwable)}. This can happen when a slot is * released concurrently with cancellation. */ @Test public void testFailExecutionAfterCancel() throws Exception { Instance instance = ExecutionGraphTestUtils.getInstance( new ActorTaskManagerGateway( new SimpleActorGateway(TestingUtils.directExecutionContext())), 2); Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext()); scheduler.newInstanceAvailable(instance); JobVertex vertex = ExecutionGraphTestUtils.createJobVertex("Test Vertex", 1, NoOpInvokable.class); ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart( Integer.MAX_VALUE, Integer.MAX_VALUE)); JobGraph jobGraph = new JobGraph("Test Job", vertex); jobGraph.setExecutionConfig(executionConfig); ExecutionGraph eg = newExecutionGraph(new InfiniteDelayRestartStrategy(), scheduler); eg.attachJobGraph(jobGraph.getVerticesSortedTopologicallyFromSources()); assertEquals(JobStatus.CREATED, eg.getState()); eg.scheduleForExecution(); assertEquals(JobStatus.RUNNING, eg.getState()); // Fail right after cancel (for example with concurrent slot release) eg.cancel(); for (ExecutionVertex v : eg.getAllExecutionVertices()) { v.getCurrentExecutionAttempt().fail(new Exception("Test Exception")); } assertEquals(JobStatus.CANCELED, eg.getTerminationFuture().get()); Execution execution = eg.getAllExecutionVertices().iterator().next().getCurrentExecutionAttempt(); execution.completeCancelling(); assertEquals(JobStatus.CANCELED, eg.getState()); }