Java Code Examples for org.apache.flink.client.program.ClusterClient#getJobStatus()
The following examples show how to use
org.apache.flink.client.program.ClusterClient#getJobStatus() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AbstractFlinkClient.java From alchemy with Apache License 2.0 | 5 votes |
public JobStatusResponse status(ClusterClient clusterClient, JobStatusRequest request) throws Exception { if (StringUtils.isEmpty(request.getJobID())) { return new JobStatusResponse("the job is not submit yet"); } CompletableFuture<JobStatus> jobStatusCompletableFuture = clusterClient.getJobStatus(JobID.fromHexString(request.getJobID())); // jobStatusCompletableFuture. switch (jobStatusCompletableFuture.get()) { case CREATED: return new JobStatusResponse(true, com.dfire.platform.alchemy.domain.enumeration.JobStatus.SUBMIT); case RESTARTING: break; case RUNNING: return new JobStatusResponse(true, com.dfire.platform.alchemy.domain.enumeration.JobStatus.RUNNING); case FAILING: case FAILED: return new JobStatusResponse(true, com.dfire.platform.alchemy.domain.enumeration.JobStatus.FAILED); case CANCELLING: case CANCELED: return new JobStatusResponse(true, com.dfire.platform.alchemy.domain.enumeration.JobStatus.CANCELED); case FINISHED: return new JobStatusResponse(true, com.dfire.platform.alchemy.domain.enumeration.JobStatus.FINISHED); case SUSPENDED: case RECONCILING: default: // nothing to do } return new JobStatusResponse(null); }
Example 2
Source File: SavepointMigrationTestBase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@SafeVarargs protected final void restoreAndExecute( StreamExecutionEnvironment env, String savepointPath, Tuple2<String, Integer>... expectedAccumulators) throws Exception { ClusterClient<?> client = miniClusterResource.getClusterClient(); client.setDetached(true); // Submit the job JobGraph jobGraph = env.getStreamGraph().getJobGraph(); jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath)); JobSubmissionResult jobSubmissionResult = client.submitJob(jobGraph, SavepointMigrationTestBase.class.getClassLoader()); boolean done = false; while (DEADLINE.hasTimeLeft()) { // try and get a job result, this will fail if the job already failed. Use this // to get out of this loop JobID jobId = jobSubmissionResult.getJobID(); try { CompletableFuture<JobStatus> jobStatusFuture = client.getJobStatus(jobSubmissionResult.getJobID()); JobStatus jobStatus = jobStatusFuture.get(5, TimeUnit.SECONDS); assertNotEquals(JobStatus.FAILED, jobStatus); } catch (Exception e) { fail("Could not connect to job: " + e); } Thread.sleep(100); Map<String, OptionalFailure<Object>> accumulators = client.getAccumulators(jobId); boolean allDone = true; for (Tuple2<String, Integer> acc : expectedAccumulators) { OptionalFailure<Object> numFinished = accumulators.get(acc.f0); if (numFinished == null) { allDone = false; break; } if (!numFinished.get().equals(acc.f1)) { allDone = false; break; } } if (allDone) { done = true; break; } } if (!done) { fail("Did not see the expected accumulator results within time limit."); } }
Example 3
Source File: SavepointMigrationTestBase.java From flink with Apache License 2.0 | 4 votes |
@SafeVarargs protected final void restoreAndExecute( StreamExecutionEnvironment env, String savepointPath, Tuple2<String, Integer>... expectedAccumulators) throws Exception { ClusterClient<?> client = miniClusterResource.getClusterClient(); client.setDetached(true); // Submit the job JobGraph jobGraph = env.getStreamGraph().getJobGraph(); jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath)); JobSubmissionResult jobSubmissionResult = client.submitJob(jobGraph, SavepointMigrationTestBase.class.getClassLoader()); boolean done = false; while (DEADLINE.hasTimeLeft()) { // try and get a job result, this will fail if the job already failed. Use this // to get out of this loop JobID jobId = jobSubmissionResult.getJobID(); try { CompletableFuture<JobStatus> jobStatusFuture = client.getJobStatus(jobSubmissionResult.getJobID()); JobStatus jobStatus = jobStatusFuture.get(5, TimeUnit.SECONDS); assertNotEquals(JobStatus.FAILED, jobStatus); } catch (Exception e) { fail("Could not connect to job: " + e); } Thread.sleep(100); Map<String, OptionalFailure<Object>> accumulators = client.getAccumulators(jobId); boolean allDone = true; for (Tuple2<String, Integer> acc : expectedAccumulators) { OptionalFailure<Object> numFinished = accumulators.get(acc.f0); if (numFinished == null) { allDone = false; break; } if (!numFinished.get().equals(acc.f1)) { allDone = false; break; } } if (allDone) { done = true; break; } } if (!done) { fail("Did not see the expected accumulator results within time limit."); } }
Example 4
Source File: SavepointMigrationTestBase.java From flink with Apache License 2.0 | 4 votes |
@SafeVarargs protected final void restoreAndExecute( StreamExecutionEnvironment env, String savepointPath, Tuple2<String, Integer>... expectedAccumulators) throws Exception { final Deadline deadLine = Deadline.fromNow(Duration.ofMinutes(5)); ClusterClient<?> client = miniClusterResource.getClusterClient(); // Submit the job JobGraph jobGraph = env.getStreamGraph().getJobGraph(); jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath)); JobSubmissionResult jobSubmissionResult = ClientUtils.submitJob(client, jobGraph); boolean done = false; while (deadLine.hasTimeLeft()) { // try and get a job result, this will fail if the job already failed. Use this // to get out of this loop JobID jobId = jobSubmissionResult.getJobID(); try { CompletableFuture<JobStatus> jobStatusFuture = client.getJobStatus(jobSubmissionResult.getJobID()); JobStatus jobStatus = jobStatusFuture.get(5, TimeUnit.SECONDS); assertNotEquals(JobStatus.FAILED, jobStatus); } catch (Exception e) { fail("Could not connect to job: " + e); } Thread.sleep(100); Map<String, Object> accumulators = client.getAccumulators(jobId).get(); boolean allDone = true; for (Tuple2<String, Integer> acc : expectedAccumulators) { Object numFinished = accumulators.get(acc.f0); if (numFinished == null) { allDone = false; break; } if (!numFinished.equals(acc.f1)) { allDone = false; break; } } if (allDone) { done = true; break; } } if (!done) { fail("Did not see the expected accumulator results within time limit."); } }