Java Code Examples for org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils#newTaskAttemptId()
The following examples show how to use
org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils#newTaskAttemptId() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestLocalContainerLauncher.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testRenameMapOutputForReduce() throws Exception { final JobConf conf = new JobConf(); final MROutputFiles mrOutputFiles = new MROutputFiles(); mrOutputFiles.setConf(conf); // make sure both dirs are distinct // conf.set(MRConfig.LOCAL_DIR, localDirs[0].toString()); final Path mapOut = mrOutputFiles.getOutputFileForWrite(1); conf.set(MRConfig.LOCAL_DIR, localDirs[1].toString()); final Path mapOutIdx = mrOutputFiles.getOutputIndexFileForWrite(1); Assert.assertNotEquals("Paths must be different!", mapOut.getParent(), mapOutIdx.getParent()); // make both dirs part of LOCAL_DIR conf.setStrings(MRConfig.LOCAL_DIR, localDirs); final FileContext lfc = FileContext.getLocalFSFileContext(conf); lfc.create(mapOut, EnumSet.of(CREATE)).close(); lfc.create(mapOutIdx, EnumSet.of(CREATE)).close(); final JobId jobId = MRBuilderUtils.newJobId(12345L, 1, 2); final TaskId tid = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP); final TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 0); LocalContainerLauncher.renameMapOutputForReduce(conf, taid, mrOutputFiles); }
Example 2
Source File: TestTaskAttemptListenerImpl.java From big-c with Apache License 2.0 | 5 votes |
private static TaskAttemptCompletionEvent createTce(int eventId, boolean isMap, TaskAttemptCompletionEventStatus status) { JobId jid = MRBuilderUtils.newJobId(12345, 1, 1); TaskId tid = MRBuilderUtils.newTaskId(jid, 0, isMap ? org.apache.hadoop.mapreduce.v2.api.records.TaskType.MAP : org.apache.hadoop.mapreduce.v2.api.records.TaskType.REDUCE); TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(tid, 0); RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); TaskAttemptCompletionEvent tce = recordFactory .newRecordInstance(TaskAttemptCompletionEvent.class); tce.setEventId(eventId); tce.setAttemptId(attemptId); tce.setStatus(status); return tce; }
Example 3
Source File: TestRMContainerAllocator.java From hadoop with Apache License 2.0 | 5 votes |
private ContainerFailedEvent createFailEvent(JobId jobId, int taskAttemptId, String host, boolean reduce) { TaskId taskId; if (reduce) { taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE); } else { taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP); } TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, taskAttemptId); return new ContainerFailedEvent(attemptId, host); }
Example 4
Source File: TestJobHistoryEntities.java From hadoop with Apache License 2.0 | 5 votes |
@Test (timeout=10000) public void testCompletedTaskAttempt() throws Exception { HistoryFileInfo info = mock(HistoryFileInfo.class); when(info.getConfFile()).thenReturn(fullConfPath); completedJob = new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user", info, jobAclsManager); TaskId mt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP); TaskId rt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE); TaskAttemptId mta1Id = MRBuilderUtils.newTaskAttemptId(mt1Id, 0); TaskAttemptId rta1Id = MRBuilderUtils.newTaskAttemptId(rt1Id, 0); Task mt1 = completedJob.getTask(mt1Id); Task rt1 = completedJob.getTask(rt1Id); TaskAttempt mta1 = mt1.getAttempt(mta1Id); assertEquals(TaskAttemptState.SUCCEEDED, mta1.getState()); assertEquals("localhost:45454", mta1.getAssignedContainerMgrAddress()); assertEquals("localhost:9999", mta1.getNodeHttpAddress()); TaskAttemptReport mta1Report = mta1.getReport(); assertEquals(TaskAttemptState.SUCCEEDED, mta1Report.getTaskAttemptState()); assertEquals("localhost", mta1Report.getNodeManagerHost()); assertEquals(45454, mta1Report.getNodeManagerPort()); assertEquals(9999, mta1Report.getNodeManagerHttpPort()); TaskAttempt rta1 = rt1.getAttempt(rta1Id); assertEquals(TaskAttemptState.SUCCEEDED, rta1.getState()); assertEquals("localhost:45454", rta1.getAssignedContainerMgrAddress()); assertEquals("localhost:9999", rta1.getNodeHttpAddress()); TaskAttemptReport rta1Report = rta1.getReport(); assertEquals(TaskAttemptState.SUCCEEDED, rta1Report.getTaskAttemptState()); assertEquals("localhost", rta1Report.getNodeManagerHost()); assertEquals(45454, rta1Report.getNodeManagerPort()); assertEquals(9999, rta1Report.getNodeManagerHttpPort()); }
Example 5
Source File: TestContainerLauncherImpl.java From hadoop with Apache License 2.0 | 5 votes |
public static TaskAttemptId makeTaskAttemptId(long ts, int appId, int taskId, TaskType taskType, int id) { ApplicationId aID = ApplicationId.newInstance(ts, appId); JobId jID = MRBuilderUtils.newJobId(aID, id); TaskId tID = MRBuilderUtils.newTaskId(jID, taskId, taskType); return MRBuilderUtils.newTaskAttemptId(tID, id); }
Example 6
Source File: TestTaskAttemptListenerImpl.java From hadoop with Apache License 2.0 | 5 votes |
private static TaskAttemptCompletionEvent createTce(int eventId, boolean isMap, TaskAttemptCompletionEventStatus status) { JobId jid = MRBuilderUtils.newJobId(12345, 1, 1); TaskId tid = MRBuilderUtils.newTaskId(jid, 0, isMap ? org.apache.hadoop.mapreduce.v2.api.records.TaskType.MAP : org.apache.hadoop.mapreduce.v2.api.records.TaskType.REDUCE); TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(tid, 0); RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); TaskAttemptCompletionEvent tce = recordFactory .newRecordInstance(TaskAttemptCompletionEvent.class); tce.setEventId(eventId); tce.setAttemptId(attemptId); tce.setStatus(status); return tce; }
Example 7
Source File: TestTaskHeartbeatHandler.java From big-c with Apache License 2.0 | 5 votes |
@SuppressWarnings({ "rawtypes", "unchecked" }) @Test public void testTimeout() throws InterruptedException { EventHandler mockHandler = mock(EventHandler.class); Clock clock = new SystemClock(); TaskHeartbeatHandler hb = new TaskHeartbeatHandler(mockHandler, clock, 1); Configuration conf = new Configuration(); conf.setInt(MRJobConfig.TASK_TIMEOUT, 10); //10 ms conf.setInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 10); //10 ms hb.init(conf); hb.start(); try { ApplicationId appId = ApplicationId.newInstance(0l, 5); JobId jobId = MRBuilderUtils.newJobId(appId, 4); TaskId tid = MRBuilderUtils.newTaskId(jobId, 3, TaskType.MAP); TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 2); hb.register(taid); Thread.sleep(100); //Events only happen when the task is canceled verify(mockHandler, times(2)).handle(any(Event.class)); } finally { hb.stop(); } }
Example 8
Source File: TestRMContainerAllocator.java From big-c with Apache License 2.0 | 5 votes |
private ContainerFailedEvent createFailEvent(JobId jobId, int taskAttemptId, String host, boolean reduce) { TaskId taskId; if (reduce) { taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE); } else { taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP); } TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, taskAttemptId); return new ContainerFailedEvent(attemptId, host); }
Example 9
Source File: TestContainerLauncherImpl.java From big-c with Apache License 2.0 | 5 votes |
public static TaskAttemptId makeTaskAttemptId(long ts, int appId, int taskId, TaskType taskType, int id) { ApplicationId aID = ApplicationId.newInstance(ts, appId); JobId jID = MRBuilderUtils.newJobId(aID, id); TaskId tID = MRBuilderUtils.newTaskId(jID, taskId, taskType); return MRBuilderUtils.newTaskAttemptId(tID, id); }
Example 10
Source File: TestContainerLauncher.java From big-c with Apache License 2.0 | 4 votes |
@Test(timeout = 5000) public void testPoolLimits() throws InterruptedException { ApplicationId appId = ApplicationId.newInstance(12345, 67); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( appId, 3); JobId jobId = MRBuilderUtils.newJobId(appId, 8); TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP); TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0); ContainerId containerId = ContainerId.newContainerId(appAttemptId, 10); AppContext context = mock(AppContext.class); CustomContainerLauncher containerLauncher = new CustomContainerLauncher( context); Configuration conf = new Configuration(); conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT, 12); containerLauncher.init(conf); containerLauncher.start(); ThreadPoolExecutor threadPool = containerLauncher.getThreadPool(); // 10 different hosts containerLauncher.expectedCorePoolSize = containerLauncher.initialPoolSize; for (int i = 0; i < 10; i++) { containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId, containerId, "host" + i + ":1234", null, ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher, 10); Assert.assertEquals(10, threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); // 4 more different hosts, but thread pool size should be capped at 12 containerLauncher.expectedCorePoolSize = 12 ; for (int i = 1; i <= 4; i++) { containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId, containerId, "host1" + i + ":1234", null, ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher, 12); Assert.assertEquals(12, threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); // Make some threads ideal so that remaining events are also done. containerLauncher.finishEventHandling = true; waitForEvents(containerLauncher, 14); Assert.assertEquals(12, threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.stop(); }
Example 11
Source File: TestJobInfo.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testAverageReduceTime() { Job job = mock(CompletedJob.class); final Task task1 = mock(Task.class); final Task task2 = mock(Task.class); JobId jobId = MRBuilderUtils.newJobId(1L, 1, 1); final TaskId taskId1 = MRBuilderUtils.newTaskId(jobId, 1, TaskType.REDUCE); final TaskId taskId2 = MRBuilderUtils.newTaskId(jobId, 2, TaskType.REDUCE); final TaskAttemptId taskAttemptId1 = MRBuilderUtils. newTaskAttemptId(taskId1, 1); final TaskAttemptId taskAttemptId2 = MRBuilderUtils. newTaskAttemptId(taskId2, 2); final TaskAttempt taskAttempt1 = mock(TaskAttempt.class); final TaskAttempt taskAttempt2 = mock(TaskAttempt.class); JobReport jobReport = mock(JobReport.class); when(taskAttempt1.getState()).thenReturn(TaskAttemptState.SUCCEEDED); when(taskAttempt1.getLaunchTime()).thenReturn(0L); when(taskAttempt1.getShuffleFinishTime()).thenReturn(4L); when(taskAttempt1.getSortFinishTime()).thenReturn(6L); when(taskAttempt1.getFinishTime()).thenReturn(8L); when(taskAttempt2.getState()).thenReturn(TaskAttemptState.SUCCEEDED); when(taskAttempt2.getLaunchTime()).thenReturn(5L); when(taskAttempt2.getShuffleFinishTime()).thenReturn(10L); when(taskAttempt2.getSortFinishTime()).thenReturn(22L); when(taskAttempt2.getFinishTime()).thenReturn(42L); when(task1.getType()).thenReturn(TaskType.REDUCE); when(task2.getType()).thenReturn(TaskType.REDUCE); when(task1.getAttempts()).thenReturn (new HashMap<TaskAttemptId, TaskAttempt>() {{put(taskAttemptId1,taskAttempt1); }}); when(task2.getAttempts()).thenReturn (new HashMap<TaskAttemptId, TaskAttempt>() {{put(taskAttemptId2,taskAttempt2); }}); when(job.getTasks()).thenReturn (new HashMap<TaskId, Task>() {{ put(taskId1,task1); put(taskId2, task2); }}); when(job.getID()).thenReturn(jobId); when(job.getReport()).thenReturn(jobReport); when(job.getName()).thenReturn("TestJobInfo"); when(job.getState()).thenReturn(JobState.SUCCEEDED); JobInfo jobInfo = new JobInfo(job); Assert.assertEquals(11L, jobInfo.getAvgReduceTime().longValue()); }
Example 12
Source File: TestTaskAttempt.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testContainerCleanedWhileCommitting() throws Exception { ApplicationId appId = ApplicationId.newInstance(1, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0); JobId jobId = MRBuilderUtils.newJobId(appId, 1); TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP); TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0); Path jobFile = mock(Path.class); MockEventHandler eventHandler = new MockEventHandler(); TaskAttemptListener taListener = mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0)); JobConf jobConf = new JobConf(); jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache", true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, ""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10"); TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[] {}); AppContext appCtx = mock(AppContext.class); ClusterInfo clusterInfo = mock(ClusterInfo.class); Resource resource = mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, new Token(), new Credentials(), new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.1", 0); ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container, mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0)); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_COMMIT_PENDING)); assertEquals("Task attempt is not in commit pending state", taImpl.getState(), TaskAttemptState.COMMIT_PENDING); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_CONTAINER_CLEANED)); assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED", eventHandler.internalError); assertEquals("Task attempt is assigned locally", Locality.OFF_SWITCH, taImpl.getLocality()); }
Example 13
Source File: TestTaskAttempt.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testContainerKillAfterAssigned() throws Exception { ApplicationId appId = ApplicationId.newInstance(1, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0); JobId jobId = MRBuilderUtils.newJobId(appId, 1); TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP); TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0); Path jobFile = mock(Path.class); MockEventHandler eventHandler = new MockEventHandler(); TaskAttemptListener taListener = mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn( new InetSocketAddress("localhost", 0)); JobConf jobConf = new JobConf(); jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache", true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, ""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10"); TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" }); AppContext appCtx = mock(AppContext.class); ClusterInfo clusterInfo = mock(ClusterInfo.class); Resource resource = mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, new Token(), new Credentials(), new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.2", 0); ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container, mock(Map.class))); assertEquals("Task attempt is not in assinged state", taImpl.getInternalState(), TaskAttemptStateInternal.ASSIGNED); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_KILL)); assertEquals("Task should be in KILLED state", TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP, taImpl.getInternalState()); }
Example 14
Source File: TestIds.java From hadoop with Apache License 2.0 | 4 votes |
private TaskAttemptId createTaskAttemptId(long clusterTimestamp, int jobIdInt, int taskIdInt, TaskType taskType, int taskAttemptIdInt) { return MRBuilderUtils.newTaskAttemptId( createTaskId(clusterTimestamp, jobIdInt, taskIdInt, taskType), taskAttemptIdInt); }
Example 15
Source File: TestRMContainerAllocator.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testCompletedContainerEvent() { RMContainerAllocator allocator = new RMContainerAllocator( mock(ClientService.class), mock(AppContext.class)); TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId( MRBuilderUtils.newTaskId( MRBuilderUtils.newJobId(1, 1, 1), 1, TaskType.MAP), 1); ApplicationId applicationId = ApplicationId.newInstance(1, 1); ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance( applicationId, 1); ContainerId containerId = ContainerId.newContainerId(applicationAttemptId, 1); ContainerStatus status = ContainerStatus.newInstance( containerId, ContainerState.RUNNING, "", 0); ContainerStatus abortedStatus = ContainerStatus.newInstance( containerId, ContainerState.RUNNING, "", ContainerExitStatus.ABORTED); TaskAttemptEvent event = allocator.createContainerFinishedEvent(status, attemptId); Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED, event.getType()); TaskAttemptEvent abortedEvent = allocator.createContainerFinishedEvent( abortedStatus, attemptId); Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent.getType()); ContainerId containerId2 = ContainerId.newContainerId(applicationAttemptId, 2); ContainerStatus status2 = ContainerStatus.newInstance(containerId2, ContainerState.RUNNING, "", 0); ContainerStatus preemptedStatus = ContainerStatus.newInstance(containerId2, ContainerState.RUNNING, "", ContainerExitStatus.PREEMPTED); TaskAttemptEvent event2 = allocator.createContainerFinishedEvent(status2, attemptId); Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED, event2.getType()); TaskAttemptEvent abortedEvent2 = allocator.createContainerFinishedEvent( preemptedStatus, attemptId); Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent2.getType()); }
Example 16
Source File: TestContainerLauncher.java From hadoop with Apache License 2.0 | 4 votes |
@Test(timeout = 5000) public void testPoolLimits() throws InterruptedException { ApplicationId appId = ApplicationId.newInstance(12345, 67); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( appId, 3); JobId jobId = MRBuilderUtils.newJobId(appId, 8); TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP); TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0); ContainerId containerId = ContainerId.newContainerId(appAttemptId, 10); AppContext context = mock(AppContext.class); CustomContainerLauncher containerLauncher = new CustomContainerLauncher( context); Configuration conf = new Configuration(); conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT, 12); containerLauncher.init(conf); containerLauncher.start(); ThreadPoolExecutor threadPool = containerLauncher.getThreadPool(); // 10 different hosts containerLauncher.expectedCorePoolSize = containerLauncher.initialPoolSize; for (int i = 0; i < 10; i++) { containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId, containerId, "host" + i + ":1234", null, ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher, 10); Assert.assertEquals(10, threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); // 4 more different hosts, but thread pool size should be capped at 12 containerLauncher.expectedCorePoolSize = 12 ; for (int i = 1; i <= 4; i++) { containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId, containerId, "host1" + i + ":1234", null, ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher, 12); Assert.assertEquals(12, threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); // Make some threads ideal so that remaining events are also done. containerLauncher.finishEventHandling = true; waitForEvents(containerLauncher, 14); Assert.assertEquals(12, threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.stop(); }
Example 17
Source File: TestTaskAttempt.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testContainerCleanedWhileRunning() throws Exception { ApplicationId appId = ApplicationId.newInstance(1, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0); JobId jobId = MRBuilderUtils.newJobId(appId, 1); TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP); TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0); Path jobFile = mock(Path.class); MockEventHandler eventHandler = new MockEventHandler(); TaskAttemptListener taListener = mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0)); JobConf jobConf = new JobConf(); jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache", true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, ""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10"); TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"}); AppContext appCtx = mock(AppContext.class); ClusterInfo clusterInfo = mock(ClusterInfo.class); Resource resource = mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, new Token(), new Credentials(), new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.2", 0); ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container, mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0)); assertEquals("Task attempt is not in running state", taImpl.getState(), TaskAttemptState.RUNNING); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_CONTAINER_CLEANED)); assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED", eventHandler.internalError); assertEquals("Task attempt is not assigned on the local rack", Locality.RACK_LOCAL, taImpl.getLocality()); }
Example 18
Source File: TestTaskAttempt.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testAppDiognosticEventOnUnassignedTask() throws Exception { ApplicationId appId = ApplicationId.newInstance(1, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( appId, 0); JobId jobId = MRBuilderUtils.newJobId(appId, 1); TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP); TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0); Path jobFile = mock(Path.class); MockEventHandler eventHandler = new MockEventHandler(); TaskAttemptListener taListener = mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn( new InetSocketAddress("localhost", 0)); JobConf jobConf = new JobConf(); jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache", true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, ""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10"); TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" }); AppContext appCtx = mock(AppContext.class); ClusterInfo clusterInfo = mock(ClusterInfo.class); Resource resource = mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, new Token(), new Credentials(), new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.1", 0); ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptId, "Task got killed")); assertFalse( "InternalError occurred trying to handle TA_DIAGNOSTICS_UPDATE on assigned task", eventHandler.internalError); }
Example 19
Source File: TestTaskAttempt.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testLaunchFailedWhileKilling() throws Exception { ApplicationId appId = ApplicationId.newInstance(1, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0); JobId jobId = MRBuilderUtils.newJobId(appId, 1); TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP); TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0); Path jobFile = mock(Path.class); MockEventHandler eventHandler = new MockEventHandler(); TaskAttemptListener taListener = mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0)); JobConf jobConf = new JobConf(); jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache", true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, ""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10"); TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"}); TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, new Token(), new Credentials(), new SystemClock(), null); NodeId nid = NodeId.newInstance("127.0.0.1", 0); ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container, mock(Map.class))); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_KILL)); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_CONTAINER_CLEANED)); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED)); assertFalse(eventHandler.internalError); assertEquals("Task attempt is not assigned on the local node", Locality.NODE_LOCAL, taImpl.getLocality()); }
Example 20
Source File: TestTaskAttempt.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testFetchFailureAttemptFinishTime() throws Exception{ ApplicationId appId = ApplicationId.newInstance(1, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0); JobId jobId = MRBuilderUtils.newJobId(appId, 1); TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP); TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0); Path jobFile = mock(Path.class); MockEventHandler eventHandler = new MockEventHandler(); TaskAttemptListener taListener = mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn( new InetSocketAddress("localhost", 0)); JobConf jobConf = new JobConf(); jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache", true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, ""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10"); TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"}); AppContext appCtx = mock(AppContext.class); ClusterInfo clusterInfo = mock(ClusterInfo.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener,mock(Token.class), new Credentials(), new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.1", 0); ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container, mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0)); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_DONE)); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_CONTAINER_CLEANED)); assertEquals("Task attempt is not in succeeded state", taImpl.getState(), TaskAttemptState.SUCCEEDED); assertTrue("Task Attempt finish time is not greater than 0", taImpl.getFinishTime() > 0); Long finishTime = taImpl.getFinishTime(); Thread.sleep(5); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE)); assertEquals("Task attempt is not in Too Many Fetch Failure state", taImpl.getState(), TaskAttemptState.FAILED); assertEquals("After TA_TOO_MANY_FETCH_FAILURE," + " Task attempt finish time is not the same ", finishTime, Long.valueOf(taImpl.getFinishTime())); }