org.apache.hadoop.mapred.TaskReport Java Examples
The following examples show how to use
org.apache.hadoop.mapred.TaskReport.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestStreamingStatus.java From hadoop with Apache License 2.0 | 6 votes |
void validateTaskStatus(StreamJob job, TaskType type) throws IOException { // Map Task has 2 phases: map, sort // Reduce Task has 3 phases: copy, sort, reduce String finalPhaseInTask; TaskReport[] reports; if (type == TaskType.MAP) { reports = job.jc_.getMapTaskReports(job.jobId_); finalPhaseInTask = "sort"; } else {// reduce task reports = job.jc_.getReduceTaskReports(job.jobId_); finalPhaseInTask = "reduce"; } assertEquals(1, reports.length); assertEquals(expectedStatus + " > " + finalPhaseInTask, reports[0].getState()); }
Example #2
Source File: TestStreamingStatus.java From big-c with Apache License 2.0 | 6 votes |
void validateTaskStatus(StreamJob job, TaskType type) throws IOException { // Map Task has 2 phases: map, sort // Reduce Task has 3 phases: copy, sort, reduce String finalPhaseInTask; TaskReport[] reports; if (type == TaskType.MAP) { reports = job.jc_.getMapTaskReports(job.jobId_); finalPhaseInTask = "sort"; } else {// reduce task reports = job.jc_.getReduceTaskReports(job.jobId_); finalPhaseInTask = "reduce"; } assertEquals(1, reports.length); assertEquals(expectedStatus + " > " + finalPhaseInTask, reports[0].getState()); }
Example #3
Source File: MRJobStats.java From spork with Apache License 2.0 | 6 votes |
private TaskStat getTaskStat(Iterator<TaskReport> tasks) { int size = 0; long max = 0; long min = Long.MAX_VALUE; long median = 0; long total = 0; List<Long> durations = new ArrayList<Long>(); while(tasks.hasNext()){ TaskReport rpt = tasks.next(); long duration = rpt.getFinishTime() - rpt.getStartTime(); durations.add(duration); max = (duration > max) ? duration : max; min = (duration < min) ? duration : min; total += duration; size++; } long avg = total / size; median = calculateMedianValue(durations); return new TaskStat(size, max, min, avg, median); }
Example #4
Source File: TestMRJobStats.java From spork with Apache License 2.0 | 6 votes |
@BeforeClass public static void oneTimeSetup() throws Exception { // setting up TaskReport for map tasks for (int i = 0; i < mapTaskReports.length; i++) { mapTaskReports[i] = Mockito.mock(TaskReport.class); Mockito.when(mapTaskReports[i].getStartTime()).thenReturn(MAP_START_FINISH_TIME_DATA[i][0] * ONE_THOUSAND); Mockito.when(mapTaskReports[i].getFinishTime()).thenReturn(MAP_START_FINISH_TIME_DATA[i][1] * ONE_THOUSAND); } // setting up TaskReport for reduce tasks for (int i = 0; i < reduceTaskReports.length; i++) { reduceTaskReports[i] = Mockito.mock(TaskReport.class); Mockito.when(reduceTaskReports[i].getStartTime()).thenReturn(REDUCE_START_FINISH_TIME_DATA[i][0] * ONE_THOUSAND); Mockito.when(reduceTaskReports[i].getFinishTime()).thenReturn(REDUCE_START_FINISH_TIME_DATA[i][1] * ONE_THOUSAND); } StringBuilder sb = new StringBuilder(); sb.append(jobID.toString()).append("\t"); sb.append(mapTaskReports.length).append("\t"); sb.append(reduceTaskReports.length).append("\t"); sb.append("500\t100\t300\t300\t500\t100\t240\t200"); ASSERT_STRING = sb.toString(); }
Example #5
Source File: HadoopShims.java From spork with Apache License 2.0 | 6 votes |
public static Iterator<TaskReport> getTaskReports(Job job, TaskType type) throws IOException { if (job.getJobConf().getBoolean(PigConfiguration.PIG_NO_TASK_REPORT, false)) { LOG.info("TaskReports are disabled for job: " + job.getAssignedJobID()); return null; } Cluster cluster = new Cluster(job.getJobConf()); try { org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID()); if (mrJob == null) { // In local mode, mrJob will be null mrJob = job.getJob(); } org.apache.hadoop.mapreduce.TaskReport[] reports = mrJob.getTaskReports(type); return DowngradeHelper.downgradeTaskReports(reports); } catch (InterruptedException ir) { throw new IOException(ir); } }
Example #6
Source File: Launcher.java From spork with Apache License 2.0 | 5 votes |
protected long computeTimeSpent(Iterator<TaskReport> taskReports) { long timeSpent = 0; while (taskReports.hasNext()) { TaskReport r = taskReports.next(); timeSpent += (r.getFinishTime() - r.getStartTime()); } return timeSpent; }
Example #7
Source File: TestMRJobStats.java From spork with Apache License 2.0 | 5 votes |
@Test public void testOneTaskReport() throws Exception { // setting up one map task report TaskReport[] mapTaskReports = new TaskReport[1]; mapTaskReports[0] = Mockito.mock(TaskReport.class); Mockito.when(mapTaskReports[0].getStartTime()).thenReturn(300L * ONE_THOUSAND); Mockito.when(mapTaskReports[0].getFinishTime()).thenReturn(400L * ONE_THOUSAND); // setting up one reduce task report TaskReport[] reduceTaskReports = new TaskReport[1]; reduceTaskReports[0] = Mockito.mock(TaskReport.class); Mockito.when(reduceTaskReports[0].getStartTime()).thenReturn(500L * ONE_THOUSAND); Mockito.when(reduceTaskReports[0].getFinishTime()).thenReturn(700L * ONE_THOUSAND); PigStats.JobGraph jobGraph = new PigStats.JobGraph(); MRJobStats jobStats = createJobStats("JobStatsTest", jobGraph); getJobStatsMethod("setId", JobID.class).invoke(jobStats, jobID); jobStats.setSuccessful(true); getJobStatsMethod("addMapReduceStatistics", Iterator.class, Iterator.class) .invoke(jobStats, Arrays.asList(mapTaskReports).iterator(), Arrays.asList(reduceTaskReports).iterator()); String msg = (String)getJobStatsMethod("getDisplayString") .invoke(jobStats); System.out.println(JobStats.SUCCESS_HEADER); System.out.println(msg); StringBuilder sb = new StringBuilder(); sb.append(jobID.toString()).append("\t"); sb.append(mapTaskReports.length).append("\t"); sb.append(reduceTaskReports.length).append("\t"); sb.append("100\t100\t100\t100\t200\t200\t200\t200"); System.out.println("assert msg: " + sb.toString()); assertTrue(msg.startsWith(sb.toString())); }
Example #8
Source File: HadoopShims.java From spork with Apache License 2.0 | 5 votes |
public static boolean isJobFailed(TaskReport report) { float successfulProgress = 1.0f; // if the progress reported is not 1.0f then the map or reduce // job failed // this comparison is in place for the backward compatibility // for Hadoop 0.20 return report.getProgress() != successfulProgress; }
Example #9
Source File: HadoopShims.java From spork with Apache License 2.0 | 5 votes |
public static Iterator<TaskReport> getTaskReports(Job job, TaskType type) throws IOException { if (job.getJobConf().getBoolean(PigConfiguration.PIG_NO_TASK_REPORT, false)) { LOG.info("TaskReports are disabled for job: " + job.getAssignedJobID()); return null; } JobClient jobClient = job.getJobClient(); TaskReport[] reports = null; if (type == TaskType.MAP) { reports = jobClient.getMapTaskReports(job.getAssignedJobID()); } else { reports = jobClient.getReduceTaskReports(job.getAssignedJobID()); } return reports == null ? null : Arrays.asList(reports).iterator(); }
Example #10
Source File: TestJobMonitorAndPrint.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testJobMonitorAndPrint() throws Exception { JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f, 0.1f, 0f, State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", true); JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f, 1f, 1f, State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", true); doAnswer( new Answer<TaskCompletionEvent[]>() { @Override public TaskCompletionEvent[] answer(InvocationOnMock invocation) throws Throwable { return new TaskCompletionEvent[0]; } } ).when(job).getTaskCompletionEvents(anyInt(), anyInt()); doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class)); when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2); // setup the logger to capture all logs Layout layout = Logger.getRootLogger().getAppender("stdout").getLayout(); ByteArrayOutputStream os = new ByteArrayOutputStream(); WriterAppender appender = new WriterAppender(layout, os); appender.setThreshold(Level.ALL); Logger qlogger = Logger.getLogger(Job.class); qlogger.addAppender(appender); job.monitorAndPrintJob(); qlogger.removeAppender(appender); LineNumberReader r = new LineNumberReader(new StringReader(os.toString())); String line; boolean foundHundred = false; boolean foundComplete = false; boolean foundUber = false; String uberModeMatch = "uber mode : true"; String progressMatch = "map 100% reduce 100%"; String completionMatch = "completed successfully"; while ((line = r.readLine()) != null) { if (line.contains(uberModeMatch)) { foundUber = true; } foundHundred = line.contains(progressMatch); if (foundHundred) break; } line = r.readLine(); foundComplete = line.contains(completionMatch); assertTrue(foundUber); assertTrue(foundHundred); assertTrue(foundComplete); System.out.println("The output of job.toString() is : \n" + job.toString()); assertTrue(job.toString().contains("Number of maps: 5\n")); assertTrue(job.toString().contains("Number of reduces: 5\n")); }
Example #11
Source File: TestJobMonitorAndPrint.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testJobMonitorAndPrint() throws Exception { JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f, 0.1f, 0f, State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", true); JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f, 1f, 1f, State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", true); doAnswer( new Answer<TaskCompletionEvent[]>() { @Override public TaskCompletionEvent[] answer(InvocationOnMock invocation) throws Throwable { return new TaskCompletionEvent[0]; } } ).when(job).getTaskCompletionEvents(anyInt(), anyInt()); doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class)); when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2); // setup the logger to capture all logs Layout layout = Logger.getRootLogger().getAppender("stdout").getLayout(); ByteArrayOutputStream os = new ByteArrayOutputStream(); WriterAppender appender = new WriterAppender(layout, os); appender.setThreshold(Level.ALL); Logger qlogger = Logger.getLogger(Job.class); qlogger.addAppender(appender); job.monitorAndPrintJob(); qlogger.removeAppender(appender); LineNumberReader r = new LineNumberReader(new StringReader(os.toString())); String line; boolean foundHundred = false; boolean foundComplete = false; boolean foundUber = false; String uberModeMatch = "uber mode : true"; String progressMatch = "map 100% reduce 100%"; String completionMatch = "completed successfully"; while ((line = r.readLine()) != null) { if (line.contains(uberModeMatch)) { foundUber = true; } foundHundred = line.contains(progressMatch); if (foundHundred) break; } line = r.readLine(); foundComplete = line.contains(completionMatch); assertTrue(foundUber); assertTrue(foundHundred); assertTrue(foundComplete); System.out.println("The output of job.toString() is : \n" + job.toString()); assertTrue(job.toString().contains("Number of maps: 5\n")); assertTrue(job.toString().contains("Number of reduces: 5\n")); }
Example #12
Source File: HadoopShims.java From spork with Apache License 2.0 | 4 votes |
public static boolean isJobFailed(TaskReport report) { return report.getCurrentStatus()==TIPStatus.FAILED; }