Java Code Examples for org.apache.flink.runtime.jobgraph.JobStatus#valueOf()
The following examples show how to use
org.apache.flink.runtime.jobgraph.JobStatus#valueOf() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JobDetails.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public JobDetails deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException { JsonNode rootNode = jsonParser.readValueAsTree(); JobID jobId = JobID.fromHexString(rootNode.get(FIELD_NAME_JOB_ID).textValue()); String jobName = rootNode.get(FIELD_NAME_JOB_NAME).textValue(); long startTime = rootNode.get(FIELD_NAME_START_TIME).longValue(); long endTime = rootNode.get(FIELD_NAME_END_TIME).longValue(); long duration = rootNode.get(FIELD_NAME_DURATION).longValue(); JobStatus jobStatus = JobStatus.valueOf(rootNode.get(FIELD_NAME_STATUS).textValue()); long lastUpdateTime = rootNode.get(FIELD_NAME_LAST_MODIFICATION).longValue(); JsonNode tasksNode = rootNode.get("tasks"); int numTasks = tasksNode.get(FIELD_NAME_TOTAL_NUMBER_TASKS).intValue(); int[] numVerticesPerExecutionState = new int[ExecutionState.values().length]; for (ExecutionState executionState : ExecutionState.values()) { numVerticesPerExecutionState[executionState.ordinal()] = tasksNode.get(executionState.name().toLowerCase()).intValue(); } return new JobDetails( jobId, jobName, startTime, endTime, duration, jobStatus, lastUpdateTime, numVerticesPerExecutionState, numTasks); }
Example 2
Source File: JobDetails.java From flink with Apache License 2.0 | 5 votes |
@Override public JobDetails deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException { JsonNode rootNode = jsonParser.readValueAsTree(); JobID jobId = JobID.fromHexString(rootNode.get(FIELD_NAME_JOB_ID).textValue()); String jobName = rootNode.get(FIELD_NAME_JOB_NAME).textValue(); long startTime = rootNode.get(FIELD_NAME_START_TIME).longValue(); long endTime = rootNode.get(FIELD_NAME_END_TIME).longValue(); long duration = rootNode.get(FIELD_NAME_DURATION).longValue(); JobStatus jobStatus = JobStatus.valueOf(rootNode.get(FIELD_NAME_STATUS).textValue()); long lastUpdateTime = rootNode.get(FIELD_NAME_LAST_MODIFICATION).longValue(); JsonNode tasksNode = rootNode.get("tasks"); int numTasks = tasksNode.get(FIELD_NAME_TOTAL_NUMBER_TASKS).intValue(); int[] numVerticesPerExecutionState = new int[ExecutionState.values().length]; for (ExecutionState executionState : ExecutionState.values()) { numVerticesPerExecutionState[executionState.ordinal()] = tasksNode.get(executionState.name().toLowerCase()).intValue(); } return new JobDetails( jobId, jobName, startTime, endTime, duration, jobStatus, lastUpdateTime, numVerticesPerExecutionState, numTasks); }
Example 3
Source File: HistoryServerArchiveFetcher.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
private static String convertLegacyJobOverview(String legacyOverview) throws IOException { JsonNode root = mapper.readTree(legacyOverview); JsonNode finishedJobs = root.get("finished"); JsonNode job = finishedJobs.get(0); JobID jobId = JobID.fromHexString(job.get("jid").asText()); String name = job.get("name").asText(); JobStatus state = JobStatus.valueOf(job.get("state").asText()); long startTime = job.get("start-time").asLong(); long endTime = job.get("end-time").asLong(); long duration = job.get("duration").asLong(); long lastMod = job.get("last-modification").asLong(); JsonNode tasks = job.get("tasks"); int numTasks = tasks.get("total").asInt(); JsonNode pendingNode = tasks.get("pending"); // for flink version < 1.4 we have pending field, // when version >= 1.4 pending has been split into scheduled, deploying, and created. boolean versionLessThan14 = pendingNode != null; int created = 0; int scheduled; int deploying = 0; if (versionLessThan14) { // pending is a mix of CREATED/SCHEDULED/DEPLOYING // to maintain the correct number of task states we pick SCHEDULED scheduled = pendingNode.asInt(); } else { created = tasks.get("created").asInt(); scheduled = tasks.get("scheduled").asInt(); deploying = tasks.get("deploying").asInt(); } int running = tasks.get("running").asInt(); int finished = tasks.get("finished").asInt(); int canceling = tasks.get("canceling").asInt(); int canceled = tasks.get("canceled").asInt(); int failed = tasks.get("failed").asInt(); int[] tasksPerState = new int[ExecutionState.values().length]; tasksPerState[ExecutionState.CREATED.ordinal()] = created; tasksPerState[ExecutionState.SCHEDULED.ordinal()] = scheduled; tasksPerState[ExecutionState.DEPLOYING.ordinal()] = deploying; tasksPerState[ExecutionState.RUNNING.ordinal()] = running; tasksPerState[ExecutionState.FINISHED.ordinal()] = finished; tasksPerState[ExecutionState.CANCELING.ordinal()] = canceling; tasksPerState[ExecutionState.CANCELED.ordinal()] = canceled; tasksPerState[ExecutionState.FAILED.ordinal()] = failed; JobDetails jobDetails = new JobDetails(jobId, name, startTime, endTime, duration, state, lastMod, tasksPerState, numTasks); MultipleJobsDetails multipleJobsDetails = new MultipleJobsDetails(Collections.singleton(jobDetails)); StringWriter sw = new StringWriter(); mapper.writeValue(sw, multipleJobsDetails); return sw.toString(); }
Example 4
Source File: HistoryServerArchiveFetcher.java From flink with Apache License 2.0 | 4 votes |
private static String convertLegacyJobOverview(String legacyOverview) throws IOException { JsonNode root = mapper.readTree(legacyOverview); JsonNode finishedJobs = root.get("finished"); JsonNode job = finishedJobs.get(0); JobID jobId = JobID.fromHexString(job.get("jid").asText()); String name = job.get("name").asText(); JobStatus state = JobStatus.valueOf(job.get("state").asText()); long startTime = job.get("start-time").asLong(); long endTime = job.get("end-time").asLong(); long duration = job.get("duration").asLong(); long lastMod = job.get("last-modification").asLong(); JsonNode tasks = job.get("tasks"); int numTasks = tasks.get("total").asInt(); JsonNode pendingNode = tasks.get("pending"); // for flink version < 1.4 we have pending field, // when version >= 1.4 pending has been split into scheduled, deploying, and created. boolean versionLessThan14 = pendingNode != null; int created = 0; int scheduled; int deploying = 0; if (versionLessThan14) { // pending is a mix of CREATED/SCHEDULED/DEPLOYING // to maintain the correct number of task states we pick SCHEDULED scheduled = pendingNode.asInt(); } else { created = tasks.get("created").asInt(); scheduled = tasks.get("scheduled").asInt(); deploying = tasks.get("deploying").asInt(); } int running = tasks.get("running").asInt(); int finished = tasks.get("finished").asInt(); int canceling = tasks.get("canceling").asInt(); int canceled = tasks.get("canceled").asInt(); int failed = tasks.get("failed").asInt(); int[] tasksPerState = new int[ExecutionState.values().length]; tasksPerState[ExecutionState.CREATED.ordinal()] = created; tasksPerState[ExecutionState.SCHEDULED.ordinal()] = scheduled; tasksPerState[ExecutionState.DEPLOYING.ordinal()] = deploying; tasksPerState[ExecutionState.RUNNING.ordinal()] = running; tasksPerState[ExecutionState.FINISHED.ordinal()] = finished; tasksPerState[ExecutionState.CANCELING.ordinal()] = canceling; tasksPerState[ExecutionState.CANCELED.ordinal()] = canceled; tasksPerState[ExecutionState.FAILED.ordinal()] = failed; JobDetails jobDetails = new JobDetails(jobId, name, startTime, endTime, duration, state, lastMod, tasksPerState, numTasks); MultipleJobsDetails multipleJobsDetails = new MultipleJobsDetails(Collections.singleton(jobDetails)); StringWriter sw = new StringWriter(); mapper.writeValue(sw, multipleJobsDetails); return sw.toString(); }