org.apache.hadoop.tools.rumen.MapTaskAttemptInfo Java Examples
The following examples show how to use
org.apache.hadoop.tools.rumen.MapTaskAttemptInfo.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DebugJobProducer.java From hadoop with Apache License 2.0 | 6 votes |
@SuppressWarnings({ "deprecation", "incomplete-switch" }) @Override public TaskAttemptInfo getTaskAttemptInfo( TaskType taskType, int taskNumber, int taskAttemptNumber) { switch (taskType) { case MAP: return new MapTaskAttemptInfo( State.SUCCEEDED, new TaskInfo( m_bytesIn[taskNumber], m_recsIn[taskNumber], m_bytesOut[taskNumber], m_recsOut[taskNumber], -1), 100); case REDUCE: return new ReduceTaskAttemptInfo( State.SUCCEEDED, new TaskInfo( r_bytesIn[taskNumber], r_recsIn[taskNumber], r_bytesOut[taskNumber], r_recsOut[taskNumber], -1), 100, 100, 100); } throw new UnsupportedOperationException(); }
Example #2
Source File: DebugJobProducer.java From big-c with Apache License 2.0 | 6 votes |
@SuppressWarnings({ "deprecation", "incomplete-switch" }) @Override public TaskAttemptInfo getTaskAttemptInfo( TaskType taskType, int taskNumber, int taskAttemptNumber) { switch (taskType) { case MAP: return new MapTaskAttemptInfo( State.SUCCEEDED, new TaskInfo( m_bytesIn[taskNumber], m_recsIn[taskNumber], m_bytesOut[taskNumber], m_recsOut[taskNumber], -1), 100); case REDUCE: return new ReduceTaskAttemptInfo( State.SUCCEEDED, new TaskInfo( r_bytesIn[taskNumber], r_recsIn[taskNumber], r_bytesOut[taskNumber], r_recsOut[taskNumber], -1), 100, 100, 100); } throw new UnsupportedOperationException(); }
Example #3
Source File: MockSimulatorJobTracker.java From RDFS with Apache License 2.0 | 4 votes |
public void runMapTask(String taskTrackerName, TaskAttemptID taskId, long mapStart, long mapRuntime, long killHeartbeat) { long mapDone = mapStart + mapRuntime; long mapEndHeartbeat = nextHeartbeat(mapDone); final boolean isKilled = (killHeartbeat>=0); if (isKilled) { mapEndHeartbeat = nextHeartbeat(killHeartbeat + 1); } LOG.debug("mapStart=" + mapStart + ", mapDone=" + mapDone + ", mapEndHeartbeat=" + mapEndHeartbeat + ", killHeartbeat=" + killHeartbeat); final int numSlotsRequired = 1; org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi = org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId); Task task = new MapTask("dummyjobfile", taskIdOldApi, 0, "dummysplitclass", null, numSlotsRequired); // all byte counters are 0 TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0); MapTaskAttemptInfo taskAttemptInfo = new MapTaskAttemptInfo(State.SUCCEEDED, taskInfo, mapRuntime); TaskTrackerAction action = new SimulatorLaunchTaskAction(task, taskAttemptInfo); heartbeats.get(mapStart).get(taskTrackerName).addTaskTrackerAction(action); if (isKilled) { action = new KillTaskAction(taskIdOldApi); heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction( action); } for(long simulationTime = mapStart + heartbeatInterval; simulationTime <= mapEndHeartbeat; simulationTime += heartbeatInterval) { State state = simulationTime < mapEndHeartbeat ? State.RUNNING : State.SUCCEEDED; if (simulationTime == mapEndHeartbeat && isKilled) { state = State.KILLED; } MapTaskStatus mapStatus = new MapTaskStatus( task.getTaskID(), 0.0f, 0, state, "", "", null, Phase.MAP, null); heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport( mapStatus); } }