org.apache.flink.runtime.client.JobExecutionException Java Examples
The following examples show how to use
org.apache.flink.runtime.client.JobExecutionException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StateAssignmentOperationTest.java From flink with Apache License 2.0 | 6 votes |
/** * Check that channel and operator states are assigned to the same tasks on recovery. */ @Test public void testChannelStateAssignmentStability() throws JobException, JobExecutionException { int numOperators = 10; // note: each operator is places into a separate vertex int numSubTasks = 100; Set<OperatorID> operatorIds = buildOperatorIds(numOperators); Map<OperatorID, OperatorState> states = buildOperatorStates(operatorIds, numSubTasks); Map<OperatorID, ExecutionJobVertex> vertices = buildVertices(operatorIds, numSubTasks); new StateAssignmentOperation(0, new HashSet<>(vertices.values()), states, false).assignStates(); for (OperatorID operatorId : operatorIds) { for (int subtaskIdx = 0; subtaskIdx < numSubTasks; subtaskIdx++) { Assert.assertEquals( states.get(operatorId).getState(subtaskIdx), getAssignedState(vertices.get(operatorId), operatorId, subtaskIdx)); } } }
Example #2
Source File: SuccessAfterNetworkBuffersFailureITCase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testSuccessfulProgramAfterFailure() throws Exception { ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); runConnectedComponents(env); try { runKMeans(env); fail("This program execution should have failed."); } catch (JobExecutionException e) { assertTrue(e.getCause().getMessage().contains("Insufficient number of network buffers")); } runConnectedComponents(env); }
Example #3
Source File: KafkaTestBase.java From flink with Apache License 2.0 | 6 votes |
protected static void tryExecutePropagateExceptions(StreamExecutionEnvironment see, String name) throws Exception { try { see.execute(name); } catch (ProgramInvocationException | JobExecutionException root) { Throwable cause = root.getCause(); // search for nested SuccessExceptions int depth = 0; while (!(cause instanceof SuccessException)) { if (cause == null || depth++ == 20) { throw root; } else { cause = cause.getCause(); } } } }
Example #4
Source File: AccumulatorErrorITCase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testInvalidTypeAccumulator() throws Exception { ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.getConfig().disableSysoutLogging(); // Test Exception forwarding with faulty Accumulator implementation env.generateSequence(0, 10000) .map(new IncompatibleAccumulatorTypesMapper()) .map(new IncompatibleAccumulatorTypesMapper2()) .output(new DiscardingOutputFormat<>()); try { env.execute(); fail("Should have failed."); } catch (JobExecutionException e) { assertTrue("Root cause should be:", e.getCause() instanceof Exception); assertTrue("Root cause should be:", e.getCause().getCause() instanceof UnsupportedOperationException); } }
Example #5
Source File: StreamTaskTimerITCase.java From flink with Apache License 2.0 | 6 votes |
/** * Note: this test fails if we don't check for exceptions in the source contexts and do not * synchronize in the source contexts. */ @Test public void testOperatorChainedToSource() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(timeCharacteristic); env.setParallelism(1); DataStream<String> source = env.addSource(new InfiniteTestSource()); source.transform("Custom Operator", BasicTypeInfo.STRING_TYPE_INFO, new TimerOperator(ChainingStrategy.ALWAYS)); try { env.execute("Timer test"); } catch (JobExecutionException e) { verifyJobExecutionException(e); } }
Example #6
Source File: TestUtils.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public static JobExecutionResult tryExecute(StreamExecutionEnvironment see, String name) throws Exception { try { return see.execute(name); } catch (ProgramInvocationException | JobExecutionException root) { Throwable cause = root.getCause(); // search for nested SuccessExceptions int depth = 0; while (!(cause instanceof SuccessException)) { if (cause == null || depth++ == 20) { root.printStackTrace(); fail("Test failed: " + root.getMessage()); } else { cause = cause.getCause(); } } } return null; }
Example #7
Source File: ElasticsearchSinkTestBase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Tests whether the Elasticsearch sink fails when there is no cluster to connect to. */ public void runInvalidElasticsearchClusterTest() throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Tuple2<Integer, String>> source = env.addSource(new SourceSinkDataTestKit.TestDataSourceFunction()); Map<String, String> userConfig = new HashMap<>(); userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1"); userConfig.put("cluster.name", "invalid-cluster-name"); source.addSink(createElasticsearchSinkForNode( 1, "invalid-cluster-name", new SourceSinkDataTestKit.TestElasticsearchSinkFunction("test"), "123.123.123.123")); // incorrect ip address try { env.execute("Elasticsearch Sink Test"); } catch (JobExecutionException expectedException) { // test passes return; } fail(); }
Example #8
Source File: StreamTaskTimerITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testTwoInputOperatorWithoutChaining() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(timeCharacteristic); env.setParallelism(1); DataStream<String> source = env.addSource(new InfiniteTestSource()); source.connect(source).transform( "Custom Operator", BasicTypeInfo.STRING_TYPE_INFO, new TwoInputTimerOperator(ChainingStrategy.NEVER)); try { env.execute("Timer test"); } catch (JobExecutionException e) { verifyJobExecutionException(e); } }
Example #9
Source File: SuccessAfterNetworkBuffersFailureITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testSuccessfulProgramAfterFailure() throws Exception { ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); runConnectedComponents(env); try { runKMeans(env); fail("This program execution should have failed."); } catch (JobExecutionException e) { assertTrue(findThrowableWithMessage(e, "Insufficient number of network buffers").isPresent()); } runConnectedComponents(env); }
Example #10
Source File: AccumulatorErrorITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testInvalidTypeAccumulator() throws Exception { ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); // Test Exception forwarding with faulty Accumulator implementation env.generateSequence(0, 10000) .map(new IncompatibleAccumulatorTypesMapper()) .map(new IncompatibleAccumulatorTypesMapper2()) .output(new DiscardingOutputFormat<>()); try { env.execute(); fail("Should have failed."); } catch (JobExecutionException e) { assertTrue(findThrowable(e, UnsupportedOperationException.class).isPresent()); } }
Example #11
Source File: ElasticsearchSinkTestBase.java From flink with Apache License 2.0 | 6 votes |
/** * Tests whether the Elasticsearch sink fails when there is no cluster to connect to. */ public void runInvalidElasticsearchClusterTest() throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Tuple2<Integer, String>> source = env.addSource(new SourceSinkDataTestKit.TestDataSourceFunction()); source.addSink(createElasticsearchSinkForNode( 1, "invalid-cluster-name", SourceSinkDataTestKit.getJsonSinkFunction("test"), "123.123.123.123")); // incorrect ip address try { env.execute("Elasticsearch Sink Test"); } catch (JobExecutionException expectedException) { // every ES version throws a different exception in case of timeouts, so don't bother asserting on the exception // test passes return; } fail(); }
Example #12
Source File: FlinkUtilsTest.java From flink-crawler with Apache License 2.0 | 6 votes |
@Test public void testMakeKeyForOperatorIndex() throws Exception { final int parallelism = 2; LocalStreamEnvironment env = new LocalStreamEnvironment(); env.setParallelism(parallelism); final int maxParallelism = env.getMaxParallelism(); DataStreamSource<Tuple2<String, Float>> pages = env.fromElements(Tuple2.of("page0", 0.0f), Tuple2.of("page0", 1.0f), Tuple2.of("page1", 10.0f), Tuple2.of("page666", 6660.0f)); DataStreamSource<Tuple2<String, Float>> epsilon = env.fromElements( Tuple2.of(FlinkUtils.makeKeyForOperatorIndex("task:%d", maxParallelism, parallelism, 0), 0.5f), Tuple2.of(FlinkUtils.makeKeyForOperatorIndex("task:%d", maxParallelism, parallelism, 1), 0.25f)); pages.union(epsilon).keyBy(0).process(new MyProcessFunction()).print(); try { env.execute(); } catch (JobExecutionException e) { Assert.fail(e.getCause().getMessage()); } }
Example #13
Source File: TestUtils.java From flink-benchmarks with Apache License 2.0 | 6 votes |
public static JobExecutionResult tryExecute(StreamExecutionEnvironment see, String name) throws Exception { try { return see.execute(name); } catch (ProgramInvocationException | JobExecutionException root) { Throwable cause = root.getCause(); // search for nested SuccessExceptions int depth = 0; while (!(cause instanceof SuccessException)) { if (cause == null || depth++ == 20) { root.printStackTrace(); fail("Test failed: " + root.getMessage()); } else { cause = cause.getCause(); } } } return null; }
Example #14
Source File: ExecutionGraphTestUtils.java From flink with Apache License 2.0 | 6 votes |
public ExecutionGraph build() throws JobException, JobExecutionException { return ExecutionGraphBuilder.buildGraph( null, jobGraph, jobMasterConfig, futureExecutor, ioExecutor, slotProvider, classLoader, checkpointRecoveryFactory, rpcTimeout, restartStrategy, metricGroup, blobWriter, allocationTimeout, TEST_LOGGER, shuffleMaster, partitionTracker, failoverStrategyFactory); }
Example #15
Source File: KafkaTestBase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
protected static void tryExecutePropagateExceptions(StreamExecutionEnvironment see, String name) throws Exception { try { see.execute(name); } catch (ProgramInvocationException | JobExecutionException root) { Throwable cause = root.getCause(); // search for nested SuccessExceptions int depth = 0; while (!(cause instanceof SuccessException)) { if (cause == null || depth++ == 20) { throw root; } else { cause = cause.getCause(); } } } }
Example #16
Source File: LegacyScheduler.java From flink with Apache License 2.0 | 6 votes |
private ExecutionGraph createExecutionGraph( JobManagerJobMetricGroup currentJobManagerJobMetricGroup, ShuffleMaster<?> shuffleMaster, final PartitionTracker partitionTracker) throws JobExecutionException, JobException { return ExecutionGraphBuilder.buildGraph( null, jobGraph, jobMasterConfiguration, futureExecutor, ioExecutor, slotProvider, userCodeLoader, checkpointRecoveryFactory, rpcTimeout, restartStrategy, currentJobManagerJobMetricGroup, blobWriter, slotRequestTimeout, log, shuffleMaster, partitionTracker); }
Example #17
Source File: Dispatcher.java From flink with Apache License 2.0 | 6 votes |
private CompletableFuture<JobManagerRunner> createJobManagerRunner(JobGraph jobGraph) { final RpcService rpcService = getRpcService(); return CompletableFuture.supplyAsync( () -> { try { return jobManagerRunnerFactory.createJobManagerRunner( jobGraph, configuration, rpcService, highAvailabilityServices, heartbeatServices, jobManagerSharedServices, new DefaultJobManagerJobMetricGroupFactory(jobManagerMetricGroup), fatalErrorHandler); } catch (Exception e) { throw new CompletionException(new JobExecutionException(jobGraph.getJobID(), "Could not instantiate JobManager.", e)); } }, rpcService.getExecutor()); }
Example #18
Source File: MiniClusterITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testHandleBatchJobsWhenNotEnoughSlot() throws Exception { try { setupAndRunHandleJobsWhenNotEnoughSlots(ScheduleMode.LAZY_FROM_SOURCES); fail("Job should fail."); } catch (JobExecutionException e) { assertTrue(findThrowableWithMessage(e, "Job execution failed.").isPresent()); assertTrue(findThrowable(e, NoResourceAvailableException.class).isPresent()); //TODO: remove the legacy scheduler message check once legacy scheduler is removed final String legacySchedulerErrorMessage = "Could not allocate enough slots"; final String ngSchedulerErrorMessage = "Could not allocate the required slot within slot request timeout"; assertTrue(findThrowableWithMessage(e, legacySchedulerErrorMessage).isPresent() || findThrowableWithMessage(e, ngSchedulerErrorMessage).isPresent()); } }
Example #19
Source File: AccumulatorErrorITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testInvalidTypeAccumulator() throws Exception { ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.getConfig().disableSysoutLogging(); // Test Exception forwarding with faulty Accumulator implementation env.generateSequence(0, 10000) .map(new IncompatibleAccumulatorTypesMapper()) .map(new IncompatibleAccumulatorTypesMapper2()) .output(new DiscardingOutputFormat<>()); try { env.execute(); fail("Should have failed."); } catch (JobExecutionException e) { assertTrue("Root cause should be:", e.getCause() instanceof Exception); assertTrue("Root cause should be:", e.getCause().getCause() instanceof UnsupportedOperationException); } }
Example #20
Source File: KafkaTestBase.java From flink with Apache License 2.0 | 6 votes |
protected static void tryExecutePropagateExceptions(StreamExecutionEnvironment see, String name) throws Exception { try { see.execute(name); } catch (ProgramInvocationException | JobExecutionException root) { Throwable cause = root.getCause(); // search for nested SuccessExceptions int depth = 0; while (!(cause instanceof SuccessException)) { if (cause == null || depth++ == 20) { throw root; } else { cause = cause.getCause(); } } } }
Example #21
Source File: ElasticsearchSinkTestBase.java From flink with Apache License 2.0 | 6 votes |
/** * Tests whether the Elasticsearch sink fails when there is no cluster to connect to. */ public void runInvalidElasticsearchClusterTest() throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Tuple2<Integer, String>> source = env.addSource(new SourceSinkDataTestKit.TestDataSourceFunction()); Map<String, String> userConfig = new HashMap<>(); userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1"); userConfig.put("cluster.name", "invalid-cluster-name"); source.addSink(createElasticsearchSinkForNode( 1, "invalid-cluster-name", new SourceSinkDataTestKit.TestElasticsearchSinkFunction("test"), "123.123.123.123")); // incorrect ip address try { env.execute("Elasticsearch Sink Test"); } catch (JobExecutionException expectedException) { // test passes return; } fail(); }
Example #22
Source File: PipelinedFailoverRegionBuildingTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private ExecutionGraph createExecutionGraph(JobGraph jobGraph) throws JobException, JobExecutionException { // configure the pipelined failover strategy final Configuration jobManagerConfig = new Configuration(); jobManagerConfig.setString( JobManagerOptions.EXECUTION_FAILOVER_STRATEGY, FailoverStrategyLoader.PIPELINED_REGION_RESTART_STRATEGY_NAME); final Time timeout = Time.seconds(10L); return ExecutionGraphBuilder.buildGraph( null, jobGraph, jobManagerConfig, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), mock(SlotProvider.class), PipelinedFailoverRegionBuildingTest.class.getClassLoader(), new StandaloneCheckpointRecoveryFactory(), timeout, new NoRestartStrategy(), new UnregisteredMetricsGroup(), 1000, VoidBlobWriter.getInstance(), timeout, log); }
Example #23
Source File: JobResultTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testFailedJobThrowsJobExecutionException() throws Exception { final FlinkException cause = new FlinkException("Test exception"); final JobResult jobResult = JobResult.createFrom( new ArchivedExecutionGraphBuilder() .setJobID(new JobID()) .setState(JobStatus.FAILED) .setFailureCause(new ErrorInfo(cause, 42L)) .build()); try { jobResult.toJobExecutionResult(getClass().getClassLoader()); fail("Job should fail with JobExecutionException."); } catch (JobExecutionException expected) { assertThat(expected.getCause(), is(equalTo(cause))); } }
Example #24
Source File: JobResultTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testFailedJobThrowsJobExecutionException() throws Exception { final FlinkException cause = new FlinkException("Test exception"); final JobResult jobResult = JobResult.createFrom( new ArchivedExecutionGraphBuilder() .setJobID(new JobID()) .setState(JobStatus.FAILED) .setFailureCause(new ErrorInfo(cause, 42L)) .build()); try { jobResult.toJobExecutionResult(getClass().getClassLoader()); fail("Job should fail with JobExecutionException."); } catch (JobExecutionException expected) { assertThat(expected.getCause(), is(equalTo(cause))); } }
Example #25
Source File: PipelinedFailoverRegionBuildingTest.java From flink with Apache License 2.0 | 6 votes |
private ExecutionGraph createExecutionGraph(JobGraph jobGraph) throws JobException, JobExecutionException { // configure the pipelined failover strategy final Configuration jobManagerConfig = new Configuration(); jobManagerConfig.setString( JobManagerOptions.EXECUTION_FAILOVER_STRATEGY, FailoverStrategyLoader.LEGACY_PIPELINED_REGION_RESTART_STRATEGY_NAME); final Time timeout = Time.seconds(10L); return ExecutionGraphBuilder.buildGraph( null, jobGraph, jobManagerConfig, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), mock(SlotProvider.class), PipelinedFailoverRegionBuildingTest.class.getClassLoader(), new StandaloneCheckpointRecoveryFactory(), timeout, new NoRestartStrategy(), new UnregisteredMetricsGroup(), VoidBlobWriter.getInstance(), timeout, log, NettyShuffleMaster.INSTANCE, NoOpPartitionTracker.INSTANCE); }
Example #26
Source File: TestingExecutionGraphBuilder.java From flink with Apache License 2.0 | 6 votes |
public ExecutionGraph build() throws JobException, JobExecutionException { return ExecutionGraphBuilder.buildGraph( null, jobGraph, jobMasterConfig, futureExecutor, ioExecutor, slotProvider, userClassLoader, checkpointRecoveryFactory, rpcTimeout, restartStrategy, metricGroup, blobWriter, allocationTimeout, LOG, shuffleMaster, partitionTracker, failoverStrategyFactory); }
Example #27
Source File: MiniClusterITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testJobWithAFailingReceiverVertex() throws Exception { final int parallelism = 11; final MiniClusterConfiguration cfg = new MiniClusterConfiguration.Builder() .setNumTaskManagers(1) .setNumSlotsPerTaskManager(parallelism) .setConfiguration(getDefaultConfiguration()) .build(); try (final MiniCluster miniCluster = new MiniCluster(cfg)) { miniCluster.start(); final JobVertex sender = new JobVertex("Sender"); sender.setInvokableClass(Sender.class); sender.setParallelism(parallelism); final JobVertex receiver = new JobVertex("Receiver"); receiver.setInvokableClass(ExceptionReceiver.class); receiver.setParallelism(parallelism); receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); final JobGraph jobGraph = new JobGraph("Pointwise Job", sender, receiver); try { miniCluster.executeJobBlocking(jobGraph); fail("Job should fail."); } catch (JobExecutionException e) { assertTrue(findThrowable(e, Exception.class).isPresent()); assertTrue(findThrowableWithMessage(e, "Test exception").isPresent()); } } }
Example #28
Source File: SimpleRecoveryITCaseBase.java From flink with Apache License 2.0 | 5 votes |
private void executeAndRunAssertions(ExecutionEnvironment env) throws Exception { try { JobExecutionResult result = env.execute(); assertTrue(result.getNetRuntime() >= 0); assertNotNull(result.getAllAccumulatorResults()); assertTrue(result.getAllAccumulatorResults().isEmpty()); } catch (JobExecutionException e) { fail("The program should have succeeded on the second run"); } }
Example #29
Source File: MiniClusterITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testHandleStreamingJobsWhenNotEnoughSlot() throws Exception { try { setupAndRunHandleJobsWhenNotEnoughSlots(ScheduleMode.EAGER); fail("Job should fail."); } catch (JobExecutionException e) { assertTrue(findThrowableWithMessage(e, "Job execution failed.").isPresent()); assertTrue(findThrowable(e, NoResourceAvailableException.class).isPresent()); assertTrue(findThrowableWithMessage(e, "Slots required: 2, slots allocated: 1").isPresent()); } }
Example #30
Source File: MiniCluster.java From flink with Apache License 2.0 | 5 votes |
/** * This method executes a job in detached mode. The method returns immediately after the job * has been added to the * * @param job The Flink job to execute * * @throws JobExecutionException Thrown if anything went amiss during initial job launch, * or if the job terminally failed. */ public void runDetached(JobGraph job) throws JobExecutionException, InterruptedException { checkNotNull(job, "job is null"); final CompletableFuture<JobSubmissionResult> submissionFuture = submitJob(job); try { submissionFuture.get(); } catch (ExecutionException e) { throw new JobExecutionException(job.getJobID(), ExceptionUtils.stripExecutionException(e)); } }