Java Code Examples for org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo#getAllTasks()
The following examples show how to use
org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo#getAllTasks() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestJobHistoryParsing.java From hadoop with Apache License 2.0 | 6 votes |
private long computeFinishedMaps(JobInfo jobInfo, int numMaps, int numSuccessfulMaps) { if (numMaps == numSuccessfulMaps) { return jobInfo.getFinishedMaps(); } long numFinishedMaps = 0; Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo .getAllTasks(); for (TaskInfo taskInfo : taskInfos.values()) { if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) { ++numFinishedMaps; } } return numFinishedMaps; }
Example 2
Source File: TestJobHistoryParsing.java From big-c with Apache License 2.0 | 6 votes |
private long computeFinishedMaps(JobInfo jobInfo, int numMaps, int numSuccessfulMaps) { if (numMaps == numSuccessfulMaps) { return jobInfo.getFinishedMaps(); } long numFinishedMaps = 0; Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo .getAllTasks(); for (TaskInfo taskInfo : taskInfos.values()) { if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) { ++numFinishedMaps; } } return numFinishedMaps; }
Example 3
Source File: HistoryViewer.java From hadoop with Apache License 2.0 | 5 votes |
/** Apply the filter (status) on the parsed job and generate summary */ public FilteredJob(JobInfo job, String status) { filter = status; Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks(); for (JobHistoryParser.TaskInfo task : tasks.values()) { Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts = task.getAllTaskAttempts(); for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) { if (attempt.getTaskStatus().equals(status)) { String hostname = attempt.getHostname(); TaskID id = attempt.getAttemptId().getTaskID(); Set<TaskID> set = badNodesToFilteredTasks.get(hostname); if (set == null) { set = new TreeSet<TaskID>(); set.add(id); badNodesToFilteredTasks.put(hostname, set); }else{ set.add(id); } } } } }
Example 4
Source File: HistoryViewer.java From big-c with Apache License 2.0 | 5 votes |
/** Apply the filter (status) on the parsed job and generate summary */ public FilteredJob(JobInfo job, String status) { filter = status; Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks(); for (JobHistoryParser.TaskInfo task : tasks.values()) { Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts = task.getAllTaskAttempts(); for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) { if (attempt.getTaskStatus().equals(status)) { String hostname = attempt.getHostname(); TaskID id = attempt.getAttemptId().getTaskID(); Set<TaskID> set = badNodesToFilteredTasks.get(hostname); if (set == null) { set = new TreeSet<TaskID>(); set.add(id); badNodesToFilteredTasks.put(hostname, set); }else{ set.add(id); } } } } }
Example 5
Source File: MRAppMaster.java From hadoop with Apache License 2.0 | 4 votes |
private void parsePreviousJobHistory() throws IOException { FSDataInputStream in = getPreviousJobHistoryStream(getConfig(), appAttemptID); JobHistoryParser parser = new JobHistoryParser(in); JobInfo jobInfo = parser.parse(); Exception parseException = parser.getParseException(); if (parseException != null) { LOG.info("Got an error parsing job-history file" + ", ignoring incomplete events.", parseException); } Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo .getAllTasks(); for (TaskInfo taskInfo : taskInfos.values()) { if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) { Iterator<Entry<TaskAttemptID, TaskAttemptInfo>> taskAttemptIterator = taskInfo.getAllTaskAttempts().entrySet().iterator(); while (taskAttemptIterator.hasNext()) { Map.Entry<TaskAttemptID, TaskAttemptInfo> currentEntry = taskAttemptIterator.next(); if (!jobInfo.getAllCompletedTaskAttempts().containsKey(currentEntry.getKey())) { taskAttemptIterator.remove(); } } completedTasksFromPreviousRun .put(TypeConverter.toYarn(taskInfo.getTaskId()), taskInfo); LOG.info("Read from history task " + TypeConverter.toYarn(taskInfo.getTaskId())); } } LOG.info("Read completed tasks from history " + completedTasksFromPreviousRun.size()); recoveredJobStartTime = jobInfo.getLaunchTime(); // recover AMInfos List<JobHistoryParser.AMInfo> jhAmInfoList = jobInfo.getAMInfos(); if (jhAmInfoList != null) { for (JobHistoryParser.AMInfo jhAmInfo : jhAmInfoList) { AMInfo amInfo = MRBuilderUtils.newAMInfo(jhAmInfo.getAppAttemptId(), jhAmInfo.getStartTime(), jhAmInfo.getContainerId(), jhAmInfo.getNodeManagerHost(), jhAmInfo.getNodeManagerPort(), jhAmInfo.getNodeManagerHttpPort()); amInfos.add(amInfo); } } }
Example 6
Source File: HistoryViewer.java From hadoop with Apache License 2.0 | 4 votes |
/** Create summary information for the parsed job */ public SummarizedJob(JobInfo job) { tasks = job.getAllTasks(); for (JobHistoryParser.TaskInfo task : tasks.values()) { Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts = task.getAllTaskAttempts(); //allHosts.put(task.getHo(Keys.HOSTNAME), ""); for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) { long startTime = attempt.getStartTime(); long finishTime = attempt.getFinishTime(); if (attempt.getTaskType().equals(TaskType.MAP)) { if (mapStarted== 0 || mapStarted > startTime) { mapStarted = startTime; } if (mapFinished < finishTime) { mapFinished = finishTime; } totalMaps++; if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedMaps++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledMaps++; } } else if (attempt.getTaskType().equals(TaskType.REDUCE)) { if (reduceStarted==0||reduceStarted > startTime) { reduceStarted = startTime; } if (reduceFinished < finishTime) { reduceFinished = finishTime; } totalReduces++; if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedReduces++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledReduces++; } } else if (attempt.getTaskType().equals(TaskType.JOB_CLEANUP)) { if (cleanupStarted==0||cleanupStarted > startTime) { cleanupStarted = startTime; } if (cleanupFinished < finishTime) { cleanupFinished = finishTime; } totalCleanups++; if (attempt.getTaskStatus().equals (TaskStatus.State.SUCCEEDED.toString())) { numFinishedCleanups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedCleanups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledCleanups++; } } else if (attempt.getTaskType().equals(TaskType.JOB_SETUP)) { if (setupStarted==0||setupStarted > startTime) { setupStarted = startTime; } if (setupFinished < finishTime) { setupFinished = finishTime; } totalSetups++; if (attempt.getTaskStatus().equals (TaskStatus.State.SUCCEEDED.toString())) { numFinishedSetups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedSetups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledSetups++; } } } } }
Example 7
Source File: HistoryViewer.java From hadoop with Apache License 2.0 | 4 votes |
/** Generate analysis information for the parsed job */ public AnalyzedJob (JobInfo job) { Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks(); int finishedMaps = (int) job.getFinishedMaps(); int finishedReduces = (int) job.getFinishedReduces(); mapTasks = new JobHistoryParser.TaskAttemptInfo[finishedMaps]; reduceTasks = new JobHistoryParser.TaskAttemptInfo[finishedReduces]; int mapIndex = 0 , reduceIndex=0; avgMapTime = 0; avgReduceTime = 0; avgShuffleTime = 0; for (JobHistoryParser.TaskInfo task : tasks.values()) { Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts = task.getAllTaskAttempts(); for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) { if (attempt.getTaskStatus(). equals(TaskStatus.State.SUCCEEDED.toString())) { long avgFinishTime = (attempt.getFinishTime() - attempt.getStartTime()); if (attempt.getTaskType().equals(TaskType.MAP)) { mapTasks[mapIndex++] = attempt; avgMapTime += avgFinishTime; } else if (attempt.getTaskType().equals(TaskType.REDUCE)) { reduceTasks[reduceIndex++] = attempt; avgShuffleTime += (attempt.getShuffleFinishTime() - attempt.getStartTime()); avgReduceTime += (attempt.getFinishTime() - attempt.getShuffleFinishTime()); } break; } } } if (finishedMaps > 0) { avgMapTime /= finishedMaps; } if (finishedReduces > 0) { avgReduceTime /= finishedReduces; avgShuffleTime /= finishedReduces; } }
Example 8
Source File: MRAppMaster.java From big-c with Apache License 2.0 | 4 votes |
private void parsePreviousJobHistory() throws IOException { FSDataInputStream in = getPreviousJobHistoryStream(getConfig(), appAttemptID); JobHistoryParser parser = new JobHistoryParser(in); JobInfo jobInfo = parser.parse(); Exception parseException = parser.getParseException(); if (parseException != null) { LOG.info("Got an error parsing job-history file" + ", ignoring incomplete events.", parseException); } Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo .getAllTasks(); for (TaskInfo taskInfo : taskInfos.values()) { if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) { Iterator<Entry<TaskAttemptID, TaskAttemptInfo>> taskAttemptIterator = taskInfo.getAllTaskAttempts().entrySet().iterator(); while (taskAttemptIterator.hasNext()) { Map.Entry<TaskAttemptID, TaskAttemptInfo> currentEntry = taskAttemptIterator.next(); if (!jobInfo.getAllCompletedTaskAttempts().containsKey(currentEntry.getKey())) { taskAttemptIterator.remove(); } } completedTasksFromPreviousRun .put(TypeConverter.toYarn(taskInfo.getTaskId()), taskInfo); LOG.info("Read from history task " + TypeConverter.toYarn(taskInfo.getTaskId())); } } LOG.info("Read completed tasks from history " + completedTasksFromPreviousRun.size()); recoveredJobStartTime = jobInfo.getLaunchTime(); // recover AMInfos List<JobHistoryParser.AMInfo> jhAmInfoList = jobInfo.getAMInfos(); if (jhAmInfoList != null) { for (JobHistoryParser.AMInfo jhAmInfo : jhAmInfoList) { AMInfo amInfo = MRBuilderUtils.newAMInfo(jhAmInfo.getAppAttemptId(), jhAmInfo.getStartTime(), jhAmInfo.getContainerId(), jhAmInfo.getNodeManagerHost(), jhAmInfo.getNodeManagerPort(), jhAmInfo.getNodeManagerHttpPort()); amInfos.add(amInfo); } } }
Example 9
Source File: HistoryViewer.java From big-c with Apache License 2.0 | 4 votes |
/** Create summary information for the parsed job */ public SummarizedJob(JobInfo job) { tasks = job.getAllTasks(); for (JobHistoryParser.TaskInfo task : tasks.values()) { Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts = task.getAllTaskAttempts(); //allHosts.put(task.getHo(Keys.HOSTNAME), ""); for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) { long startTime = attempt.getStartTime(); long finishTime = attempt.getFinishTime(); if (attempt.getTaskType().equals(TaskType.MAP)) { if (mapStarted== 0 || mapStarted > startTime) { mapStarted = startTime; } if (mapFinished < finishTime) { mapFinished = finishTime; } totalMaps++; if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedMaps++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledMaps++; } } else if (attempt.getTaskType().equals(TaskType.REDUCE)) { if (reduceStarted==0||reduceStarted > startTime) { reduceStarted = startTime; } if (reduceFinished < finishTime) { reduceFinished = finishTime; } totalReduces++; if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedReduces++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledReduces++; } } else if (attempt.getTaskType().equals(TaskType.JOB_CLEANUP)) { if (cleanupStarted==0||cleanupStarted > startTime) { cleanupStarted = startTime; } if (cleanupFinished < finishTime) { cleanupFinished = finishTime; } totalCleanups++; if (attempt.getTaskStatus().equals (TaskStatus.State.SUCCEEDED.toString())) { numFinishedCleanups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedCleanups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledCleanups++; } } else if (attempt.getTaskType().equals(TaskType.JOB_SETUP)) { if (setupStarted==0||setupStarted > startTime) { setupStarted = startTime; } if (setupFinished < finishTime) { setupFinished = finishTime; } totalSetups++; if (attempt.getTaskStatus().equals (TaskStatus.State.SUCCEEDED.toString())) { numFinishedSetups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedSetups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledSetups++; } } } } }
Example 10
Source File: HistoryViewer.java From big-c with Apache License 2.0 | 4 votes |
/** Generate analysis information for the parsed job */ public AnalyzedJob (JobInfo job) { Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks(); int finishedMaps = (int) job.getFinishedMaps(); int finishedReduces = (int) job.getFinishedReduces(); mapTasks = new JobHistoryParser.TaskAttemptInfo[finishedMaps]; reduceTasks = new JobHistoryParser.TaskAttemptInfo[finishedReduces]; int mapIndex = 0 , reduceIndex=0; avgMapTime = 0; avgReduceTime = 0; avgShuffleTime = 0; for (JobHistoryParser.TaskInfo task : tasks.values()) { Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts = task.getAllTaskAttempts(); for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) { if (attempt.getTaskStatus(). equals(TaskStatus.State.SUCCEEDED.toString())) { long avgFinishTime = (attempt.getFinishTime() - attempt.getStartTime()); if (attempt.getTaskType().equals(TaskType.MAP)) { mapTasks[mapIndex++] = attempt; avgMapTime += avgFinishTime; } else if (attempt.getTaskType().equals(TaskType.REDUCE)) { reduceTasks[reduceIndex++] = attempt; avgShuffleTime += (attempt.getShuffleFinishTime() - attempt.getStartTime()); avgReduceTime += (attempt.getFinishTime() - attempt.getShuffleFinishTime()); } break; } } } if (finishedMaps > 0) { avgMapTime /= finishedMaps; } if (finishedReduces > 0) { avgReduceTime /= finishedReduces; avgShuffleTime /= finishedReduces; } }