org.apache.hadoop.mapred.JobHistory.Keys Java Examples
The following examples show how to use
org.apache.hadoop.mapred.JobHistory.Keys.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HadoopJobHistoryLoader.java From spork with Apache License 2.0 | 6 votes |
private static Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt( JobHistory.Task task) { Map<String, JobHistory.TaskAttempt> taskAttempts = task .getTaskAttempts(); int size = taskAttempts.size(); Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts .entrySet().iterator(); for (int i = 0; i < size; i++) { // CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next(); JobHistory.TaskAttempt attempt = tae.getValue(); if (null != attempt && null != attempt.getValues() && attempt.getValues().containsKey(JobHistory.Keys.TASK_STATUS) && attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals( "SUCCESS")) { return attempt.getValues(); } } return null; }
Example #2
Source File: CoronaJobHistory.java From RDFS with Apache License 2.0 | 6 votes |
/** * Log a number of keys and values with the record. This method allows to do * it in a synchronous fashion * @param writers the writers to send the data to * @param recordType the type to log * @param keys keys to log * @param values values to log * @param sync if true - will block until the data is written */ private void log(ArrayList<PrintWriter> writers, RecordTypes recordType, Keys[] keys, String[] values, boolean sync) { StringBuffer buf = new StringBuffer(recordType.name()); buf.append(JobHistory.DELIMITER); for (int i = 0; i < keys.length; i++) { buf.append(keys[i]); buf.append("=\""); values[i] = JobHistory.escapeString(values[i]); buf.append(values[i]); buf.append("\""); buf.append(JobHistory.DELIMITER); } buf.append(JobHistory.LINE_DELIMITER_CHAR); for (PrintWriter out : writers) { LogTask task = new LogTask(out, buf.toString()); if (sync) { task.run(); } else { fileManager.addWriteTask(task); } } }
Example #3
Source File: JobTracker.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * Adds a task-attempt in the listener */ private void processTaskAttempt(String taskAttemptId, JobHistory.TaskAttempt attempt) { TaskAttemptID id = TaskAttemptID.forName(taskAttemptId); // Check if the transaction for this attempt can be committed String taskStatus = attempt.get(Keys.TASK_STATUS); if (taskStatus.length() > 0) { // This means this is an update event if (taskStatus.equals(Values.SUCCESS.name())) { // Mark this attempt as hanging hangingAttempts.put(id.getTaskID().toString(), taskAttemptId); addSuccessfulAttempt(jip, id, attempt); } else { addUnsuccessfulAttempt(jip, id, attempt); numEventsRecovered += 2; } } else { createTaskAttempt(jip, id, attempt); } }
Example #4
Source File: JobTracker.java From hadoop-gpu with Apache License 2.0 | 6 votes |
private JobStatusChangeEvent updateJob(JobInProgress jip, JobHistory.JobInfo job) { // Change the job priority String jobpriority = job.get(Keys.JOB_PRIORITY); JobPriority priority = JobPriority.valueOf(jobpriority); // It's important to update this via the jobtracker's api as it will // take care of updating the event listeners too setJobPriority(jip.getJobID(), priority); // Save the previous job status JobStatus oldStatus = (JobStatus)jip.getStatus().clone(); // Set the start/launch time only if there are recovered tasks // Increment the job's restart count jip.updateJobInfo(job.getLong(JobHistory.Keys.SUBMIT_TIME), job.getLong(JobHistory.Keys.LAUNCH_TIME)); // Save the new job status JobStatus newStatus = (JobStatus)jip.getStatus().clone(); return new JobStatusChangeEvent(jip, EventType.START_TIME_CHANGED, oldStatus, newStatus); }
Example #5
Source File: JobTracker.java From hadoop-gpu with Apache License 2.0 | 6 votes |
private void updateTip(TaskInProgress tip, JobHistory.Task task) { long startTime = task.getLong(Keys.START_TIME); if (startTime != 0) { tip.setExecStartTime(startTime); } long finishTime = task.getLong(Keys.FINISH_TIME); // For failed tasks finish-time will be missing if (finishTime != 0) { tip.setExecFinishTime(finishTime); } String cause = task.get(Keys.TASK_ATTEMPT_ID); if (cause.length() > 0) { // This means that the this is a FAILED events TaskAttemptID id = TaskAttemptID.forName(cause); TaskStatus status = tip.getTaskStatus(id); synchronized (JobTracker.this) { // This will add the tip failed event in the new log tip.getJob().failedTask(tip, id, status.getDiagnosticInfo(), status.getPhase(), status.getRunState(), status.getTaskTracker()); } } }
Example #6
Source File: CoronaJobHistory.java From RDFS with Apache License 2.0 | 6 votes |
/** * Update the finish time of task. * @param taskId task id * @param finishTime finish time of task in ms */ public void logTaskUpdates(TaskID taskId, long finishTime) { if (disableHistory) { return; } JobID id = taskId.getJobID(); if (!this.jobId.equals(id)) { throw new RuntimeException("JobId from task: " + id + " does not match expected: " + jobId); } if (null != writers) { log(writers, RecordTypes.Task, new Keys[]{Keys.TASKID, Keys.FINISH_TIME}, new String[]{ taskId.toString(), String.valueOf(finishTime)}); } }
Example #7
Source File: CoronaJobHistory.java From RDFS with Apache License 2.0 | 6 votes |
/** * Log finish time of task. * @param taskId task id * @param taskType MAP or REDUCE * @param finishTime finish timeof task in ms */ public void logTaskFinished(TaskID taskId, String taskType, long finishTime, Counters counters) { if (disableHistory) { return; } JobID id = taskId.getJobID(); if (!this.jobId.equals(id)) { throw new RuntimeException("JobId from task: " + id + " does not match expected: " + jobId); } if (null != writers) { log(writers, RecordTypes.Task, new Keys[]{Keys.TASKID, Keys.TASK_TYPE, Keys.TASK_STATUS, Keys.FINISH_TIME, Keys.COUNTERS}, new String[]{ taskId.toString(), taskType, Values.SUCCESS.name(), String.valueOf(finishTime), counters.makeEscapedCompactString()}); } }
Example #8
Source File: CoronaJobHistory.java From RDFS with Apache License 2.0 | 6 votes |
/** * Log start time of task (TIP). * @param taskId task id * @param taskType MAP or REDUCE * @param startTime startTime of tip. */ public void logTaskStarted(TaskID taskId, String taskType, long startTime, String splitLocations) { if (disableHistory) { return; } JobID id = taskId.getJobID(); if (!this.jobId.equals(id)) { throw new RuntimeException("JobId from task: " + id + " does not match expected: " + jobId); } if (null != writers) { log(writers, RecordTypes.Task, new Keys[]{Keys.TASKID, Keys.TASK_TYPE , Keys.START_TIME, Keys.SPLITS}, new String[]{taskId.toString(), taskType, String.valueOf(startTime), splitLocations}); } }
Example #9
Source File: CoronaJobHistory.java From RDFS with Apache License 2.0 | 6 votes |
/** * Logs job killed event. Closes the job history log file. * * @param timestamp * time when job killed was issued in ms. * @param finishedMaps * no finished map tasks. * @param finishedReduces * no of finished reduce tasks. */ public void logKilled(long timestamp, int finishedMaps, int finishedReduces, Counters counters) { if (disableHistory) { return; } if (null != writers) { log(writers, RecordTypes.Job, new Keys[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES, Keys.COUNTERS }, new String[] {jobId.toString(), String.valueOf(timestamp), Values.KILLED.name(), String.valueOf(finishedMaps), String.valueOf(finishedReduces), counters.makeEscapedCompactString()}, true); closeAndClear(writers); } }
Example #10
Source File: CoronaJobHistory.java From RDFS with Apache License 2.0 | 6 votes |
/** * Logs job failed event. Closes the job history log file. * @param timestamp time when job failure was detected in ms. * @param finishedMaps no finished map tasks. * @param finishedReduces no of finished reduce tasks. */ public void logFailed(long timestamp, int finishedMaps, int finishedReduces, Counters counters) { if (disableHistory) { return; } if (null != writers) { log(writers, RecordTypes.Job, new Keys[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES, Keys.COUNTERS}, new String[] {jobId.toString(), String.valueOf(timestamp), Values.FAILED.name(), String.valueOf(finishedMaps), String.valueOf(finishedReduces), counters.makeEscapedCompactString()}, true); closeAndClear(writers); } }
Example #11
Source File: CoronaJobHistory.java From RDFS with Apache License 2.0 | 6 votes |
/** * Logs launch time of job. * * @param startTime start time of job. * @param totalMaps total maps assigned by jobtracker. * @param totalReduces total reduces. */ public void logInited(long startTime, int totalMaps, int totalReduces) { if (disableHistory) { return; } if (null != writers) { log(writers, RecordTypes.Job, new Keys[] {Keys.JOBID, Keys.LAUNCH_TIME, Keys.TOTAL_MAPS, Keys.TOTAL_REDUCES, Keys.JOB_STATUS}, new String[] {jobId.toString(), String.valueOf(startTime), String.valueOf(totalMaps), String.valueOf(totalReduces), Values.PREP.name()}); } }
Example #12
Source File: JobStatistics.java From RDFS with Apache License 2.0 | 6 votes |
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) { Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts(); int size = taskAttempts.size(); java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator(); for (int i=0; i<size; i++) { // CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next(); JobHistory.TaskAttempt attempt = tae.getValue(); if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) { return attempt.getValues(); } } return null; }
Example #13
Source File: DefaultJobHistoryParser.java From RDFS with Apache License 2.0 | 6 votes |
public void handle(JobHistory.RecordTypes recType, Map<Keys, String> values) throws IOException { if (recType.equals(JobHistory.RecordTypes.MapAttempt) || recType.equals(JobHistory.RecordTypes.ReduceAttempt)) { if (failureType.equals(values.get(Keys.TASK_STATUS)) ) { String hostName = values.get(Keys.HOSTNAME); String taskid = values.get(Keys.TASKID); Set<String> tasks = badNodesToNumFailedTasks.get(hostName); if (null == tasks ){ tasks = new TreeSet<String>(); tasks.add(taskid); badNodesToNumFailedTasks.put(hostName, tasks); }else{ tasks.add(taskid); } } } }
Example #14
Source File: DefaultJobHistoryParser.java From hadoop-gpu with Apache License 2.0 | 6 votes |
public void handle(JobHistory.RecordTypes recType, Map<Keys, String> values) throws IOException { if (recType.equals(JobHistory.RecordTypes.MapAttempt) || recType.equals(JobHistory.RecordTypes.ReduceAttempt)) { if (failureType.equals(values.get(Keys.TASK_STATUS)) ) { String hostName = values.get(Keys.HOSTNAME); String taskid = values.get(Keys.TASKID); Set<String> tasks = badNodesToNumFailedTasks.get(hostName); if (null == tasks ){ tasks = new TreeSet<String>(); tasks.add(taskid); badNodesToNumFailedTasks.put(hostName, tasks); }else{ tasks.add(taskid); } } } }
Example #15
Source File: JobStatistics.java From hadoop-gpu with Apache License 2.0 | 6 votes |
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) { Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts(); int size = taskAttempts.size(); java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator(); for (int i=0; i<size; i++) { // CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next(); JobHistory.TaskAttempt attempt = tae.getValue(); if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) { return attempt.getValues(); } } return null; }
Example #16
Source File: DefaultJobHistoryParser.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private JobHistory.Task getTask(String taskId) { JobHistory.Task task = job.getAllTasks().get(taskId); if (null == task) { task = new JobHistory.Task(); task.set(Keys.TASKID, taskId); job.getAllTasks().put(taskId, task); } return task; }
Example #17
Source File: DefaultJobHistoryParser.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private JobHistory.MapAttempt getMapAttempt( String jobid, String jobTrackerId, String taskId, String taskAttemptId) { JobHistory.Task task = getTask(taskId); JobHistory.MapAttempt mapAttempt = (JobHistory.MapAttempt) task.getTaskAttempts().get(taskAttemptId); if (null == mapAttempt) { mapAttempt = new JobHistory.MapAttempt(); mapAttempt.set(Keys.TASK_ATTEMPT_ID, taskAttemptId); task.getTaskAttempts().put(taskAttemptId, mapAttempt); } return mapAttempt; }
Example #18
Source File: JobTracker.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private void addUnsuccessfulAttempt(JobInProgress job, TaskAttemptID attemptId, JobHistory.TaskAttempt attempt) { // I. Get the required info TaskID taskId = attemptId.getTaskID(); TaskInProgress tip = job.getTaskInProgress(taskId); long attemptFinishTime = attempt.getLong(Keys.FINISH_TIME); TaskStatus taskStatus = (TaskStatus)tip.getTaskStatus(attemptId).clone(); taskStatus.setFinishTime(attemptFinishTime); // Reset the progress taskStatus.setProgress(0.0f); String stateString = attempt.get(Keys.STATE_STRING); taskStatus.setStateString(stateString); boolean hasFailed = attempt.get(Keys.TASK_STATUS).equals(Values.FAILED.name()); // Set the state failed/killed if (hasFailed) { taskStatus.setRunState(TaskStatus.State.FAILED); } else { taskStatus.setRunState(TaskStatus.State.KILLED); } // Get/Set the error msg String diagInfo = attempt.get(Keys.ERROR); taskStatus.setDiagnosticInfo(diagInfo); // diag info synchronized (JobTracker.this) { // II. Update the task status job.updateTaskStatus(tip, taskStatus); } // III. Prevent the task from expiry expireLaunchingTasks.removeTask(attemptId); }
Example #19
Source File: DefaultJobHistoryParser.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private JobHistory.ReduceAttempt getReduceAttempt( String jobid, String jobTrackerId, String taskId, String taskAttemptId) { JobHistory.Task task = getTask(taskId); JobHistory.ReduceAttempt reduceAttempt = (JobHistory.ReduceAttempt) task.getTaskAttempts().get(taskAttemptId); if (null == reduceAttempt) { reduceAttempt = new JobHistory.ReduceAttempt(); reduceAttempt.set(Keys.TASK_ATTEMPT_ID, taskAttemptId); task.getTaskAttempts().put(taskAttemptId, reduceAttempt); } return reduceAttempt; }
Example #20
Source File: JobTracker.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private void checkAndInit() throws IOException { String jobStatus = this.job.get(Keys.JOB_STATUS); if (Values.PREP.name().equals(jobStatus)) { hasUpdates = true; LOG.info("Calling init from RM for job " + jip.getJobID().toString()); try { initJob(jip); } catch (Throwable t) { LOG.error("Job initialization failed : \n" + StringUtils.stringifyException(t)); failJob(jip); throw new IOException(t); } } }
Example #21
Source File: HadoopJobHistoryLoader.java From spork with Apache License 2.0 | 5 votes |
private static void populateJob (Map<JobHistory.Keys, String> jobC, Map<String, String> job) { int size = jobC.size(); Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator(); for (int i = 0; i < size; i++) { Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next(); JobHistory.Keys key = entry.getKey(); String value = entry.getValue(); switch (key) { case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID.toString(), value); break; case FINISH_TIME: job.put(JobKeys.FINISH_TIME.toString(), value); break; case JOBID: job.put(JobKeys.JOBID.toString(), value); break; case JOBNAME: job.put(JobKeys.JOBNAME.toString(), value); break; case USER: job.put(JobKeys.USER.toString(), value); break; case JOBCONF: job.put(JobKeys.JOBCONF.toString(), value); break; case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME.toString(), value); break; case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME.toString(), value); break; case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS.toString(), value); break; case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES.toString(), value); break; case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS.toString(), value); break; case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES.toString(), value); break; case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS.toString(), value); break; case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES.toString(), value); break; case JOB_STATUS: job.put(JobKeys.STATUS.toString(), value); break; case COUNTERS: value.concat(","); parseAndAddJobCounters(job, value); break; default: LOG.debug("JobHistory.Keys."+ key + " : NOT INCLUDED IN LOADER RETURN VALUE"); break; } } }
Example #22
Source File: JobStatistics.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException { int size = jobC.size(); java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator(); for (int i = 0; i < size; i++) { Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next(); JobHistory.Keys key = entry.getKey(); String value = entry.getValue(); switch (key) { case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break; //case START_TIME: job.put(JobKeys., value); break; case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break; case JOBID: job.put(JobKeys.JOBID, value); break; case JOBNAME: job.put(JobKeys.JOBNAME, value); break; case USER: job.put(JobKeys.USER, value); break; case JOBCONF: job.put(JobKeys.JOBCONF, value); break; case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break; case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break; case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break; case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break; case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break; case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break; case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break; case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break; case JOB_STATUS: job.put(JobKeys.STATUS, value); break; case COUNTERS: value.concat(","); parseAndAddJobCounters(job, value); break; default: System.out.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS"); break; } } }
Example #23
Source File: CoronaJobHistory.java From RDFS with Apache License 2.0 | 5 votes |
/** * Log killed reduce task attempt. * * @param taskAttemptId task attempt id * @param timestamp time stamp when task failed * @param hostName host name of the task attempt. * @param error error message of the task. * @param taskType Whether the attempt is cleanup or setup or reduce */ public void logReduceTaskKilled(TaskAttemptID taskAttemptId, long timestamp, String hostName, String error, String taskType) { if (disableHistory) { return; } JobID id = taskAttemptId.getJobID(); if (!this.jobId.equals(id)) { throw new RuntimeException("JobId from task: " + id + " does not match expected: " + jobId); } if (null != writers) { log(writers, RecordTypes.ReduceAttempt, new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR }, new String[]{ taskType, taskAttemptId.getTaskID().toString(), taskAttemptId.toString(), Values.KILLED.name(), String.valueOf(timestamp), hostName, error }); } }
Example #24
Source File: CoronaJobHistory.java From RDFS with Apache License 2.0 | 5 votes |
/** * Log failed reduce task attempt. * * @param taskAttemptId task attempt id * @param timestamp time stamp when task failed * @param hostName host name of the task attempt. * @param error error message of the task. * @param taskType Whether the attempt is cleanup or setup or reduce */ public void logReduceTaskFailed(TaskAttemptID taskAttemptId, long timestamp, String hostName, String error, String taskType) { if (disableHistory) { return; } JobID id = taskAttemptId.getJobID(); if (!this.jobId.equals(id)) { throw new RuntimeException("JobId from task: " + id + " does not match expected: " + jobId); } if (null != writers) { log(writers, RecordTypes.ReduceAttempt, new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR }, new String[]{ taskType, taskAttemptId.getTaskID().toString(), taskAttemptId.toString(), Values.FAILED.name(), String.valueOf(timestamp), hostName, error }); } }
Example #25
Source File: CoronaJobHistory.java From RDFS with Apache License 2.0 | 5 votes |
/** * Log start time of Reduce task attempt. * * @param taskAttemptId task attempt id * @param startTime start time * @param trackerName tracker name * @param httpPort the http port of the tracker executing the task attempt * @param taskType Whether the attempt is cleanup or setup or reduce */ public void logReduceTaskStarted(TaskAttemptID taskAttemptId, long startTime, String trackerName, int httpPort, String taskType) { if (disableHistory) { return; } JobID id = taskAttemptId.getJobID(); if (!this.jobId.equals(id)) { throw new RuntimeException("JobId from task: " + id + " does not match expected: " + jobId); } if (null != writers) { log(writers, RecordTypes.ReduceAttempt, new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.START_TIME, Keys.TRACKER_NAME, Keys.HTTP_PORT}, new String[]{taskType, taskAttemptId.getTaskID().toString(), taskAttemptId.toString(), String.valueOf(startTime), trackerName, httpPort == -1 ? "" : String.valueOf(httpPort)}); } }
Example #26
Source File: DefaultJobHistoryParser.java From RDFS with Apache License 2.0 | 5 votes |
private JobHistory.Task getTask(String taskId) { JobHistory.Task task = job.getAllTasks().get(taskId); if (null == task) { task = new JobHistory.Task(); task.set(Keys.TASKID, taskId); job.getAllTasks().put(taskId, task); } return task; }
Example #27
Source File: DefaultJobHistoryParser.java From RDFS with Apache License 2.0 | 5 votes |
private JobHistory.MapAttempt getMapAttempt( String jobid, String jobTrackerId, String taskId, String taskAttemptId) { JobHistory.Task task = getTask(taskId); JobHistory.MapAttempt mapAttempt = (JobHistory.MapAttempt) task.getTaskAttempts().get(taskAttemptId); if (null == mapAttempt) { mapAttempt = new JobHistory.MapAttempt(); mapAttempt.set(Keys.TASK_ATTEMPT_ID, taskAttemptId); task.getTaskAttempts().put(taskAttemptId, mapAttempt); } return mapAttempt; }
Example #28
Source File: DefaultJobHistoryParser.java From RDFS with Apache License 2.0 | 5 votes |
private JobHistory.ReduceAttempt getReduceAttempt( String jobid, String jobTrackerId, String taskId, String taskAttemptId) { JobHistory.Task task = getTask(taskId); JobHistory.ReduceAttempt reduceAttempt = (JobHistory.ReduceAttempt) task.getTaskAttempts().get(taskAttemptId); if (null == reduceAttempt) { reduceAttempt = new JobHistory.ReduceAttempt(); reduceAttempt.set(Keys.TASK_ATTEMPT_ID, taskAttemptId); task.getTaskAttempts().put(taskAttemptId, reduceAttempt); } return reduceAttempt; }
Example #29
Source File: JobStatistics.java From RDFS with Apache License 2.0 | 5 votes |
private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException { int size = jobC.size(); java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator(); for (int i = 0; i < size; i++) { Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next(); JobHistory.Keys key = entry.getKey(); String value = entry.getValue(); //System.out.println("JobHistory.JobKeys."+key+": "+value); switch (key) { case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break; case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break; case JOBID: job.put(JobKeys.JOBID, value); break; case JOBNAME: job.put(JobKeys.JOBNAME, value); break; case USER: job.put(JobKeys.USER, value); break; case JOBCONF: job.put(JobKeys.JOBCONF, value); break; case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break; case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break; case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break; case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break; case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break; case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break; case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break; case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break; case JOB_STATUS: job.put(JobKeys.STATUS, value); break; case JOB_PRIORITY: job.put(JobKeys.JOB_PRIORITY, value); break; case COUNTERS: value.concat(","); parseAndAddJobCounters(job, value); break; default: System.err.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS"); break; } } }
Example #30
Source File: CoronaJobHistory.java From RDFS with Apache License 2.0 | 5 votes |
/** * Logs job as running */ public void logStarted() { if (disableHistory) { return; } if (null != writers) { log(writers, RecordTypes.Job, new Keys[] {Keys.JOBID, Keys.JOB_STATUS}, new String[] {jobId.toString(), Values.RUNNING.name()}); } }