org.apache.hadoop.yarn.util.SystemClock Java Examples
The following examples show how to use
org.apache.hadoop.yarn.util.SystemClock.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestJobImpl.java From big-c with Apache License 2.0 | 6 votes |
private static CommitterEventHandler createCommitterEventHandler( Dispatcher dispatcher, OutputCommitter committer) { final SystemClock clock = new SystemClock(); AppContext appContext = mock(AppContext.class); when(appContext.getEventHandler()).thenReturn( dispatcher.getEventHandler()); when(appContext.getClock()).thenReturn(clock); RMHeartbeatHandler heartbeatHandler = new RMHeartbeatHandler() { @Override public long getLastHeartbeatTime() { return clock.getTime(); } @Override public void runOnNextHeartbeat(Runnable callback) { callback.run(); } }; ApplicationAttemptId id = ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0"); when(appContext.getApplicationID()).thenReturn(id.getApplicationId()); when(appContext.getApplicationAttemptId()).thenReturn(id); CommitterEventHandler handler = new CommitterEventHandler(appContext, committer, heartbeatHandler); dispatcher.register(CommitterEventType.class, handler); return handler; }
Example #2
Source File: LocalClient.java From tez with Apache License 2.0 | 6 votes |
@VisibleForTesting protected DAGAppMaster createDAGAppMaster(ApplicationAttemptId applicationAttemptId, ContainerId cId, String currentHost, int nmPort, int nmHttpPort, Clock clock, long appSubmitTime, boolean isSession, String userDir, String[] localDirs, String[] logDirs, Credentials credentials, String jobUserName) throws IOException { // Read in additional information about external services AMPluginDescriptorProto amPluginDescriptorProto = TezUtilsInternal.readUserSpecifiedTezConfiguration(userDir) .getAmPluginDescriptor(); return new DAGAppMaster(applicationAttemptId, cId, currentHost, nmPort, nmHttpPort, new SystemClock(), appSubmitTime, isSession, userDir, localDirs, logDirs, versionInfo.getVersion(), credentials, jobUserName, amPluginDescriptorProto); }
Example #3
Source File: TestQueueManager.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { conf = new FairSchedulerConfiguration(); FairScheduler scheduler = mock(FairScheduler.class); AllocationConfiguration allocConf = new AllocationConfiguration(conf); when(scheduler.getAllocationConfiguration()).thenReturn(allocConf); when(scheduler.getConf()).thenReturn(conf); SystemClock clock = new SystemClock(); when(scheduler.getClock()).thenReturn(clock); notEmptyQueues = new HashSet<FSQueue>(); queueManager = new QueueManager(scheduler) { @Override public boolean isEmpty(FSQueue queue) { return !notEmptyQueues.contains(queue); } }; FSQueueMetrics.forQueue("root", null, true, conf); queueManager.initialize(conf); }
Example #4
Source File: TestQueueManager.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { conf = new FairSchedulerConfiguration(); FairScheduler scheduler = mock(FairScheduler.class); AllocationConfiguration allocConf = new AllocationConfiguration(conf); when(scheduler.getAllocationConfiguration()).thenReturn(allocConf); when(scheduler.getConf()).thenReturn(conf); SystemClock clock = new SystemClock(); when(scheduler.getClock()).thenReturn(clock); notEmptyQueues = new HashSet<FSQueue>(); queueManager = new QueueManager(scheduler) { @Override public boolean isEmpty(FSQueue queue) { return !notEmptyQueues.contains(queue); } }; FSQueueMetrics.forQueue("root", null, true, conf); queueManager.initialize(conf); }
Example #5
Source File: TestJobImpl.java From hadoop with Apache License 2.0 | 6 votes |
public StubbedJob(JobId jobId, ApplicationAttemptId applicationAttemptId, Configuration conf, EventHandler eventHandler, boolean newApiCommitter, String user, int numSplits, AppContext appContext) { super(jobId, applicationAttemptId, conf, eventHandler, null, new JobTokenSecretManager(), new Credentials(), new SystemClock(), Collections.<TaskId, TaskInfo> emptyMap(), MRAppMetrics.create(), null, newApiCommitter, user, System.currentTimeMillis(), null, appContext, null, null); initTransition = getInitTransition(numSplits); localFactory = stateMachineFactory.addTransition(JobStateInternal.NEW, EnumSet.of(JobStateInternal.INITED, JobStateInternal.FAILED), JobEventType.JOB_INIT, // This is abusive. initTransition); // This "this leak" is okay because the retained pointer is in an // instance variable. localStateMachine = localFactory.make(this); }
Example #6
Source File: TestTaskAttempt.java From incubator-tez with Apache License 2.0 | 5 votes |
@Test(timeout = 5000) public void testLocalityRequest() { TaskAttemptImpl.ScheduleTaskattemptTransition sta = new TaskAttemptImpl.ScheduleTaskattemptTransition(); EventHandler eventHandler = mock(EventHandler.class); Set<String> hosts = new TreeSet<String>(); hosts.add("host1"); hosts.add("host2"); hosts.add("host3"); TaskLocationHint locationHint = new TaskLocationHint(hosts, null); TezTaskID taskID = TezTaskID.getInstance( TezVertexID.getInstance(TezDAGID.getInstance("1", 1, 1), 1), 1); TaskAttemptImpl taImpl = new MockTaskAttemptImpl(taskID, 1, eventHandler, mock(TaskAttemptListener.class), new Configuration(), new SystemClock(), mock(TaskHeartbeatHandler.class), mock(AppContext.class), locationHint, false, Resource.newInstance(1024, 1), createFakeContainerContext(), false); TaskAttemptEventSchedule sEvent = mock(TaskAttemptEventSchedule.class); sta.transition(taImpl, sEvent); ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class); verify(eventHandler, times(1)).handle(arg.capture()); if (!(arg.getAllValues().get(0) instanceof AMSchedulerEventTALaunchRequest)) { fail("Second event not of type " + AMSchedulerEventTALaunchRequest.class.getName()); } // TODO Move the Rack request check to the client after TEZ-125 is fixed. Set<String> requestedRacks = taImpl.taskRacks; assertEquals(1, requestedRacks.size()); assertEquals(3, taImpl.taskHosts.size()); for (int i = 0; i < 3; i++) { String host = ("host" + (i + 1)); assertEquals(host, true, taImpl.taskHosts.contains(host)); } }
Example #7
Source File: StreamingContainerManager.java From attic-apex-core with Apache License 2.0 | 5 votes |
private StreamingContainerManager(CheckpointState checkpointedState, boolean enableEventRecording) { this.vars = checkpointedState.finals; this.clock = new SystemClock(); poolExecutor = Executors.newFixedThreadPool(4); this.plan = checkpointedState.physicalPlan; this.eventBus = new MBassador<>(BusConfiguration.Default(1, 1, 1)); this.journal = new Journal(this); init(enableEventRecording); }
Example #8
Source File: HistoryFileManager.java From big-c with Apache License 2.0 | 5 votes |
@Override protected void serviceInit(Configuration conf) throws Exception { this.conf = conf; int serialNumberLowDigits = 3; serialNumberFormat = ("%0" + (JobHistoryUtils.SERIAL_NUMBER_DIRECTORY_DIGITS + serialNumberLowDigits) + "d"); long maxFSWaitTime = conf.getLong( JHAdminConfig.MR_HISTORY_MAX_START_WAIT_TIME, JHAdminConfig.DEFAULT_MR_HISTORY_MAX_START_WAIT_TIME); createHistoryDirs(new SystemClock(), 10 * 1000, maxFSWaitTime); this.aclsMgr = new JobACLsManager(conf); maxHistoryAge = conf.getLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS, JHAdminConfig.DEFAULT_MR_HISTORY_MAX_AGE); jobListCache = createJobListCache(); serialNumberIndex = new SerialNumberIndex(conf.getInt( JHAdminConfig.MR_HISTORY_DATESTRING_CACHE_SIZE, JHAdminConfig.DEFAULT_MR_HISTORY_DATESTRING_CACHE_SIZE)); int numMoveThreads = conf.getInt( JHAdminConfig.MR_HISTORY_MOVE_THREAD_COUNT, JHAdminConfig.DEFAULT_MR_HISTORY_MOVE_THREAD_COUNT); ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat( "MoveIntermediateToDone Thread #%d").build(); moveToDoneExecutor = new ThreadPoolExecutor(numMoveThreads, numMoveThreads, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf); super.serviceInit(conf); }
Example #9
Source File: StreamingContainerManager.java From Bats with Apache License 2.0 | 5 votes |
private StreamingContainerManager(CheckpointState checkpointedState, boolean enableEventRecording) { this.vars = checkpointedState.finals; this.clock = new SystemClock(); poolExecutor = Executors.newFixedThreadPool(4); this.plan = checkpointedState.physicalPlan; this.eventBus = new MBassador<>(BusConfiguration.Default(1, 1, 1)); this.journal = new Journal(this); init(enableEventRecording); }
Example #10
Source File: TestTaskCommunicatorManager2.java From tez with Apache License 2.0 | 5 votes |
TaskCommunicatorManagerWrapperForTest() throws IOException, TezException { dagId = TezDAGID.getInstance(appId, 1); vertexId = TezVertexID.getInstance(dagId, 100); doReturn(eventHandler).when(appContext).getEventHandler(); doReturn(dag).when(appContext).getCurrentDAG(); doReturn(vertex).when(dag).getVertex(eq(vertexId)); doReturn(new TaskAttemptEventInfo(0, new LinkedList<TezEvent>(), 0)).when(vertex) .getTaskAttemptTezEvents(any(TezTaskAttemptID.class), anyInt(), anyInt(), anyInt()); doReturn(appAttemptId).when(appContext).getApplicationAttemptId(); doReturn(credentials).when(appContext).getAppCredentials(); doReturn(appAcls).when(appContext).getApplicationACLs(); doReturn(amContainerMap).when(appContext).getAllContainers(); doReturn(new SystemClock()).when(appContext).getClock(); NodeId nodeId = NodeId.newInstance("localhost", 0); AMContainer amContainer = mock(AMContainer.class); Container container = mock(Container.class); doReturn(nodeId).when(container).getNodeId(); doReturn(amContainer).when(amContainerMap).get(any(ContainerId.class)); doReturn(container).when(amContainer).getContainer(); userPayload = TezUtils.createUserPayloadFromConf(conf); taskCommunicatorManager = new TaskCommunicatorManager(appContext, mock(TaskHeartbeatHandler.class), mock(ContainerHeartbeatHandler.class), Lists.newArrayList(new NamedEntityDescriptor( TezConstants.getTezYarnServicePluginName(), null).setUserPayload(userPayload))); }
Example #11
Source File: TestRecovery.java From hadoop with Apache License 2.0 | 5 votes |
private MapTaskImpl getMockMapTask(long clusterTimestamp, EventHandler eh) { ApplicationId appId = ApplicationId.newInstance(clusterTimestamp, 1); JobId jobId = MRBuilderUtils.newJobId(appId, 1); int partitions = 2; Path remoteJobConfFile = mock(Path.class); JobConf conf = new JobConf(); TaskAttemptListener taskAttemptListener = mock(TaskAttemptListener.class); Token<JobTokenIdentifier> jobToken = (Token<JobTokenIdentifier>) mock(Token.class); Credentials credentials = null; Clock clock = new SystemClock(); int appAttemptId = 3; MRAppMetrics metrics = mock(MRAppMetrics.class); Resource minContainerRequirements = mock(Resource.class); when(minContainerRequirements.getMemory()).thenReturn(1000); ClusterInfo clusterInfo = mock(ClusterInfo.class); AppContext appContext = mock(AppContext.class); when(appContext.getClusterInfo()).thenReturn(clusterInfo); TaskSplitMetaInfo taskSplitMetaInfo = mock(TaskSplitMetaInfo.class); MapTaskImpl mapTask = new MapTaskImpl(jobId, partitions, eh, remoteJobConfFile, conf, taskSplitMetaInfo, taskAttemptListener, jobToken, credentials, clock, appAttemptId, metrics, appContext); return mapTask; }
Example #12
Source File: TestJobImpl.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testReportDiagnostics() throws Exception { JobID jobID = JobID.forName("job_1234567890000_0001"); JobId jobId = TypeConverter.toYarn(jobID); final String diagMsg = "some diagnostic message"; final JobDiagnosticsUpdateEvent diagUpdateEvent = new JobDiagnosticsUpdateEvent(jobId, diagMsg); MRAppMetrics mrAppMetrics = MRAppMetrics.create(); AppContext mockContext = mock(AppContext.class); when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true); JobImpl job = new JobImpl(jobId, Records .newRecord(ApplicationAttemptId.class), new Configuration(), mock(EventHandler.class), null, mock(JobTokenSecretManager.class), null, new SystemClock(), null, mrAppMetrics, null, true, null, 0, null, mockContext, null, null); job.handle(diagUpdateEvent); String diagnostics = job.getReport().getDiagnostics(); Assert.assertNotNull(diagnostics); Assert.assertTrue(diagnostics.contains(diagMsg)); job = new JobImpl(jobId, Records .newRecord(ApplicationAttemptId.class), new Configuration(), mock(EventHandler.class), null, mock(JobTokenSecretManager.class), null, new SystemClock(), null, mrAppMetrics, null, true, null, 0, null, mockContext, null, null); job.handle(new JobEvent(jobId, JobEventType.JOB_KILL)); job.handle(diagUpdateEvent); diagnostics = job.getReport().getDiagnostics(); Assert.assertNotNull(diagnostics); Assert.assertTrue(diagnostics.contains(diagMsg)); }
Example #13
Source File: TestTaskImpl.java From incubator-tez with Apache License 2.0 | 5 votes |
@Before public void setup() { conf = new Configuration(); taskAttemptListener = mock(TaskAttemptListener.class); taskHeartbeatHandler = mock(TaskHeartbeatHandler.class); credentials = new Credentials(); clock = new SystemClock(); locationHint = new TaskLocationHint(null, null); appId = ApplicationId.newInstance(System.currentTimeMillis(), 1); dagId = TezDAGID.getInstance(appId, 1); vertexId = TezVertexID.getInstance(dagId, 1); appContext = mock(AppContext.class, RETURNS_DEEP_STUBS); mockContainerId = mock(ContainerId.class); mockContainer = mock(Container.class); mockAMContainer = mock(AMContainer.class); mockNodeId = mock(NodeId.class); when(mockContainer.getId()).thenReturn(mockContainerId); when(mockContainer.getNodeId()).thenReturn(mockNodeId); when(mockAMContainer.getContainer()).thenReturn(mockContainer); when(appContext.getAllContainers().get(mockContainerId)).thenReturn(mockAMContainer); taskResource = Resource.newInstance(1024, 1); localResources = new HashMap<String, LocalResource>(); environment = new HashMap<String, String>(); javaOpts = ""; leafVertex = false; containerContext = new ContainerContext(localResources, credentials, environment, javaOpts); Vertex vertex = mock(Vertex.class); eventHandler = new TestEventHandler(); mockTask = new MockTaskImpl(vertexId, partition, eventHandler, conf, taskAttemptListener, clock, taskHeartbeatHandler, appContext, leafVertex, locationHint, taskResource, containerContext, vertex); }
Example #14
Source File: TestTaskHeartbeatHandler.java From hadoop with Apache License 2.0 | 5 votes |
@SuppressWarnings({ "rawtypes", "unchecked" }) @Test public void testTimeout() throws InterruptedException { EventHandler mockHandler = mock(EventHandler.class); Clock clock = new SystemClock(); TaskHeartbeatHandler hb = new TaskHeartbeatHandler(mockHandler, clock, 1); Configuration conf = new Configuration(); conf.setInt(MRJobConfig.TASK_TIMEOUT, 10); //10 ms conf.setInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 10); //10 ms hb.init(conf); hb.start(); try { ApplicationId appId = ApplicationId.newInstance(0l, 5); JobId jobId = MRBuilderUtils.newJobId(appId, 4); TaskId tid = MRBuilderUtils.newTaskId(jobId, 3, TaskType.MAP); TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 2); hb.register(taid); Thread.sleep(100); //Events only happen when the task is canceled verify(mockHandler, times(2)).handle(any(Event.class)); } finally { hb.stop(); } }
Example #15
Source File: TestRecoveryParser.java From tez with Apache License 2.0 | 5 votes |
@Test(timeout=5000) public void testRecoverableNonSummary2() throws IOException { ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1); TezDAGID dagID = TezDAGID.getInstance(appId, 1); AppContext appContext = mock(AppContext.class); when(appContext.getCurrentRecoveryDir()).thenReturn(new Path(recoveryPath+"/1")); when(appContext.getClock()).thenReturn(new SystemClock()); when(mockDAGImpl.getID()).thenReturn(dagID); // MockRecoveryService will skip the non-summary event MockRecoveryService rService = new MockRecoveryService(appContext); Configuration conf = new Configuration(); conf.setBoolean(RecoveryService.TEZ_TEST_RECOVERY_DRAIN_EVENTS_WHEN_STOPPED, true); rService.init(conf); rService.start(); DAGPlan dagPlan = TestDAGImpl.createTestDAGPlan(); // write a DAGSubmittedEvent first to initialize summaryStream rService.handle(new DAGHistoryEvent(dagID, new DAGSubmittedEvent(dagID, 1L, dagPlan, ApplicationAttemptId.newInstance(appId, 1), null, "user", new Configuration(), null, null))); // It should be fine to skip other events, just for testing. TezVertexID vertexId = TezVertexID.getInstance(dagID, 0); rService.handle(new DAGHistoryEvent(dagID, new VertexGroupCommitStartedEvent(dagID, "group_1", Lists.newArrayList(TezVertexID.getInstance(dagID, 0), TezVertexID.getInstance(dagID, 1)), 0L))); rService.handle(new DAGHistoryEvent(dagID, new VertexGroupCommitFinishedEvent(dagID, "group_1", Lists.newArrayList(TezVertexID.getInstance(dagID, 0), TezVertexID.getInstance(dagID, 1)), 0L))); rService.stop(); DAGRecoveryData dagData = parser.parseRecoveryData(); assertTrue(dagData.nonRecoverable); assertTrue(dagData.reason.contains("Vertex has been committed as member of vertex group" + ", but its full recovery events are not seen")); }
Example #16
Source File: TestRecovery.java From big-c with Apache License 2.0 | 5 votes |
private MapTaskImpl getMockMapTask(long clusterTimestamp, EventHandler eh) { ApplicationId appId = ApplicationId.newInstance(clusterTimestamp, 1); JobId jobId = MRBuilderUtils.newJobId(appId, 1); int partitions = 2; Path remoteJobConfFile = mock(Path.class); JobConf conf = new JobConf(); TaskAttemptListener taskAttemptListener = mock(TaskAttemptListener.class); Token<JobTokenIdentifier> jobToken = (Token<JobTokenIdentifier>) mock(Token.class); Credentials credentials = null; Clock clock = new SystemClock(); int appAttemptId = 3; MRAppMetrics metrics = mock(MRAppMetrics.class); Resource minContainerRequirements = mock(Resource.class); when(minContainerRequirements.getMemory()).thenReturn(1000); ClusterInfo clusterInfo = mock(ClusterInfo.class); AppContext appContext = mock(AppContext.class); when(appContext.getClusterInfo()).thenReturn(clusterInfo); TaskSplitMetaInfo taskSplitMetaInfo = mock(TaskSplitMetaInfo.class); MapTaskImpl mapTask = new MapTaskImpl(jobId, partitions, eh, remoteJobConfFile, conf, taskSplitMetaInfo, taskAttemptListener, jobToken, credentials, clock, appAttemptId, metrics, appContext); return mapTask; }
Example #17
Source File: TestRecoveryParser.java From tez with Apache License 2.0 | 5 votes |
@Test(timeout=5000) public void testRecoverableSummary_DAGFinishCommitting() throws IOException { ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); TezDAGID dagID = TezDAGID.getInstance(appId, 1); AppContext appContext = mock(AppContext.class); when(appContext.getCurrentRecoveryDir()).thenReturn(new Path(recoveryPath+"/1")); when(appContext.getClock()).thenReturn(new SystemClock()); when(mockDAGImpl.getID()).thenReturn(dagID); RecoveryService rService = new RecoveryService(appContext); Configuration conf = new Configuration(); conf.setBoolean(RecoveryService.TEZ_TEST_RECOVERY_DRAIN_EVENTS_WHEN_STOPPED, true); rService.init(conf); rService.start(); DAGPlan dagPlan = TestDAGImpl.createTestDAGPlan(); // write a DAGSubmittedEvent first to initialize summaryStream rService.handle(new DAGHistoryEvent(dagID, new DAGSubmittedEvent(dagID, 1L, dagPlan, ApplicationAttemptId.newInstance(appId, 1), null, "user", new Configuration(), null, null))); // It should be fine to skip other events, just for testing. rService.handle(new DAGHistoryEvent(dagID, new DAGCommitStartedEvent(dagID, 0L))); rService.handle(new DAGHistoryEvent(dagID, new DAGFinishedEvent(dagID, 1L, 2L, DAGState.FAILED, "diag", null, "user", "dag1", null, appAttemptId, dagPlan))); rService.stop(); DAGRecoveryData dagData = parser.parseRecoveryData(); assertEquals(dagID, dagData.recoveredDagID); assertEquals(DAGState.FAILED, dagData.dagState); assertFalse(dagData.nonRecoverable); assertNull(dagData.reason); assertTrue(dagData.isCompleted); }
Example #18
Source File: TestTaskAttempt.java From tez with Apache License 2.0 | 4 votes |
@Test(timeout = 5000) public void testSuccess() throws Exception { ApplicationId appId = ApplicationId.newInstance(1, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( appId, 0); TezDAGID dagID = TezDAGID.getInstance(appId, 1); TezVertexID vertexID = TezVertexID.getInstance(dagID, 1); TezTaskID taskID = TezTaskID.getInstance(vertexID, 1); MockEventHandler eventHandler = spy(new MockEventHandler()); TaskCommunicatorManagerInterface taListener = createMockTaskAttemptListener(); Configuration taskConf = new Configuration(); taskConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); taskConf.setBoolean("fs.file.impl.disable.cache", true); taskConf.setBoolean(TezConfiguration.TEZ_AM_SPECULATION_ENABLED, true); locationHint = TaskLocationHint.createTaskLocationHint( new HashSet<String>(Arrays.asList(new String[]{"127.0.0.1"})), null); Resource resource = Resource.newInstance(1024, 1); NodeId nid = NodeId.newInstance("127.0.0.1", 0); @SuppressWarnings("deprecation") ContainerId contId = ContainerId.newInstance(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); AMContainerMap containers = new AMContainerMap( mock(ContainerHeartbeatHandler.class), mock(TaskCommunicatorManagerInterface.class), new ContainerContextMatcher(), appCtx); containers.addContainerIfNew(container, 0, 0, 0); doReturn(new ClusterInfo()).when(appCtx).getClusterInfo(); doReturn(containers).when(appCtx).getAllContainers(); TaskHeartbeatHandler mockHeartbeatHandler = mock(TaskHeartbeatHandler.class); TaskAttemptImpl taImpl = new MockTaskAttemptImpl(taskID, 1, eventHandler, taListener, taskConf, new SystemClock(), mockHeartbeatHandler, appCtx, false, resource, createFakeContainerContext(), false); TezTaskAttemptID taskAttemptID = taImpl.getID(); ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class); taImpl.handle(new TaskAttemptEventSchedule(taskAttemptID, 0, 0)); taImpl.handle(new TaskAttemptEventSubmitted(taskAttemptID, contId)); taImpl.handle(new TaskAttemptEventStartedRemotely(taskAttemptID)); assertEquals("Task attempt is not in the RUNNING state", taImpl.getState(), TaskAttemptState.RUNNING); verify(mockHeartbeatHandler).register(taskAttemptID); int expectedEventsAtRunning = 6; verify(eventHandler, times(expectedEventsAtRunning)).handle(arg.capture()); verifyEventType( arg.getAllValues().subList(0, expectedEventsAtRunning), SpeculatorEventTaskAttemptStatusUpdate.class, 1); taImpl.handle(new TaskAttemptEventStatusUpdate(taskAttemptID, new TaskStatusUpdateEvent(null, 0.1f, null, false))); taImpl.handle(new TaskAttemptEvent(taskAttemptID, TaskAttemptEventType.TA_DONE)); assertEquals("Task attempt is not in the SUCCEEDED state", taImpl.getState(), TaskAttemptState.SUCCEEDED); verify(mockHeartbeatHandler).unregister(taskAttemptID); assertEquals(0, taImpl.getDiagnostics().size()); int expectedEvenstAfterTerminating = expectedEventsAtRunning + 5; arg = ArgumentCaptor.forClass(Event.class); verify(eventHandler, times(expectedEvenstAfterTerminating)).handle(arg.capture()); Event e = verifyEventType( arg.getAllValues().subList(expectedEventsAtRunning, expectedEvenstAfterTerminating), TaskEventTASucceeded.class, 1); assertEquals(TaskEventType.T_ATTEMPT_SUCCEEDED, e.getType()); verifyEventType( arg.getAllValues().subList(expectedEventsAtRunning, expectedEvenstAfterTerminating), AMSchedulerEventTAEnded.class, 1); verifyEventType( arg.getAllValues().subList(expectedEventsAtRunning, expectedEvenstAfterTerminating), DAGEventCounterUpdate.class, 1); verifyEventType( arg.getAllValues().subList(expectedEventsAtRunning, expectedEvenstAfterTerminating), SpeculatorEventTaskAttemptStatusUpdate.class, 2); }
Example #19
Source File: ContainerAllocationExpirer.java From big-c with Apache License 2.0 | 4 votes |
public ContainerAllocationExpirer(Dispatcher d) { super(ContainerAllocationExpirer.class.getName(), new SystemClock()); this.dispatcher = d.getEventHandler(); }
Example #20
Source File: TestTaskAttempt.java From incubator-tez with Apache License 2.0 | 4 votes |
@Test(timeout = 5000) // Ensure node failure on Successful Non-Leaf tasks cause them to be marked as KILLED public void testNodeFailedNonLeafVertex() throws Exception { ApplicationId appId = ApplicationId.newInstance(1, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( appId, 0); TezDAGID dagID = TezDAGID.getInstance(appId, 1); TezVertexID vertexID = TezVertexID.getInstance(dagID, 1); TezTaskID taskID = TezTaskID.getInstance(vertexID, 1); TezTaskAttemptID taskAttemptID = TezTaskAttemptID.getInstance(taskID, 0); MockEventHandler eventHandler = spy(new MockEventHandler()); TaskAttemptListener taListener = mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn( new InetSocketAddress("localhost", 0)); Configuration taskConf = new Configuration(); taskConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); taskConf.setBoolean("fs.file.impl.disable.cache", true); TaskLocationHint locationHint = new TaskLocationHint( new HashSet<String>(Arrays.asList(new String[] {"127.0.0.1"})), null); Resource resource = Resource.newInstance(1024, 1); NodeId nid = NodeId.newInstance("127.0.0.1", 0); ContainerId contId = ContainerId.newInstance(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); AppContext appCtx = mock(AppContext.class); AMContainerMap containers = new AMContainerMap( mock(ContainerHeartbeatHandler.class), mock(TaskAttemptListener.class), new ContainerContextMatcher(), appCtx); containers.addContainerIfNew(container); doReturn(new ClusterInfo()).when(appCtx).getClusterInfo(); doReturn(containers).when(appCtx).getAllContainers(); MockTaskAttemptImpl taImpl = new MockTaskAttemptImpl(taskID, 1, eventHandler, taListener, taskConf, new SystemClock(), mock(TaskHeartbeatHandler.class), appCtx, locationHint, false, resource, createFakeContainerContext(), false); ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class); taImpl.handle(new TaskAttemptEventSchedule(taskAttemptID, null)); // At state STARTING. taImpl.handle(new TaskAttemptEventStartedRemotely(taskAttemptID, contId, null)); assertEquals("Task attempt is not in the RUNNING state", TaskAttemptState.RUNNING, taImpl.getState()); int expectedEventsAtRunning = 3; verify(eventHandler, times(expectedEventsAtRunning)).handle(arg.capture()); taImpl.handle(new TaskAttemptEvent(taskAttemptID, TaskAttemptEventType.TA_DONE)); assertEquals("Task attempt is not in the SUCCEEDED state", TaskAttemptState.SUCCEEDED, taImpl.getState()); assertEquals(0, taImpl.getDiagnostics().size()); int expectedEvenstAfterTerminating = expectedEventsAtRunning + 3; arg = ArgumentCaptor.forClass(Event.class); verify(eventHandler, times(expectedEvenstAfterTerminating)).handle(arg.capture()); verifyEventType( arg.getAllValues().subList(expectedEventsAtRunning, expectedEvenstAfterTerminating), TaskEventTAUpdate.class, 1); verifyEventType( arg.getAllValues().subList(expectedEventsAtRunning, expectedEvenstAfterTerminating), AMSchedulerEventTAEnded.class, 1); // Send out a Node Failure. taImpl.handle(new TaskAttemptEventNodeFailed(taskAttemptID, "NodeDecomissioned")); // Verify in KILLED state assertEquals("Task attempt is not in the KILLED state", TaskAttemptState.KILLED, taImpl.getState()); assertEquals(true, taImpl.inputFailedReported); // Verify one event to the Task informing it about FAILURE. No events to scheduler. Counter event. int expectedEventsNodeFailure = expectedEvenstAfterTerminating + 2; arg = ArgumentCaptor.forClass(Event.class); verify(eventHandler, times(expectedEventsNodeFailure)).handle(arg.capture()); verifyEventType( arg.getAllValues().subList(expectedEvenstAfterTerminating, expectedEventsNodeFailure), TaskEventTAUpdate.class, 1); // Verify still in KILLED state assertEquals("Task attempt is not in the KILLED state", TaskAttemptState.KILLED, taImpl.getState()); }
Example #21
Source File: TestTaskAttempt.java From tez with Apache License 2.0 | 4 votes |
@SuppressWarnings("deprecation") @Test(timeout = 5000) public void testKilledInNew() throws ServicePluginException { ApplicationId appId = ApplicationId.newInstance(1, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( appId, 0); TezDAGID dagID = TezDAGID.getInstance(appId, 1); TezVertexID vertexID = TezVertexID.getInstance(dagID, 1); TezTaskID taskID = TezTaskID.getInstance(vertexID, 1); MockEventHandler eventHandler = spy(new MockEventHandler()); TaskCommunicatorManagerInterface taListener = createMockTaskAttemptListener(); Configuration taskConf = new Configuration(); taskConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); taskConf.setBoolean("fs.file.impl.disable.cache", true); locationHint = TaskLocationHint.createTaskLocationHint( new HashSet<String>(Arrays.asList(new String[]{"127.0.0.1"})), null); Resource resource = Resource.newInstance(1024, 1); NodeId nid = NodeId.newInstance("127.0.0.1", 0); ContainerId contId = ContainerId.newInstance(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); AMContainerMap containers = new AMContainerMap( mock(ContainerHeartbeatHandler.class), mock(TaskCommunicatorManagerInterface.class), new ContainerContextMatcher(), appCtx); containers.addContainerIfNew(container, 0, 0, 0); doReturn(new ClusterInfo()).when(appCtx).getClusterInfo(); doReturn(containers).when(appCtx).getAllContainers(); TaskHeartbeatHandler mockHeartbeatHandler = mock(TaskHeartbeatHandler.class); MockTaskAttemptImpl taImpl = new MockTaskAttemptImpl(taskID, 1, eventHandler, taListener, taskConf, new SystemClock(), mockHeartbeatHandler, appCtx, false, resource, createFakeContainerContext(), true); Assert.assertEquals(TaskAttemptStateInternal.NEW, taImpl.getInternalState()); taImpl.handle(new TaskAttemptEventKillRequest(taImpl.getID(), "kill it", TaskAttemptTerminationCause.TERMINATED_BY_CLIENT)); Assert.assertEquals(TaskAttemptStateInternal.KILLED, taImpl.getInternalState()); Assert.assertEquals(0, taImpl.taskAttemptStartedEventLogged); Assert.assertEquals(1, taImpl.taskAttemptFinishedEventLogged); }
Example #22
Source File: RMAppImpl.java From big-c with Apache License 2.0 | 4 votes |
public RMAppImpl(ApplicationId applicationId, RMContext rmContext, Configuration config, String name, String user, String queue, ApplicationSubmissionContext submissionContext, YarnScheduler scheduler, ApplicationMasterService masterService, long submitTime, String applicationType, Set<String> applicationTags, ResourceRequest amReq) { this.systemClock = new SystemClock(); this.applicationId = applicationId; this.name = name; this.rmContext = rmContext; this.dispatcher = rmContext.getDispatcher(); this.handler = dispatcher.getEventHandler(); this.conf = config; this.user = user; this.queue = queue; this.submissionContext = submissionContext; this.scheduler = scheduler; this.masterService = masterService; this.submitTime = submitTime; this.startTime = this.systemClock.getTime(); this.applicationType = applicationType; this.applicationTags = applicationTags; this.amReq = amReq; int globalMaxAppAttempts = conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); int individualMaxAppAttempts = submissionContext.getMaxAppAttempts(); if (individualMaxAppAttempts <= 0 || individualMaxAppAttempts > globalMaxAppAttempts) { this.maxAppAttempts = globalMaxAppAttempts; LOG.warn("The specific max attempts: " + individualMaxAppAttempts + " for application: " + applicationId.getId() + " is invalid, because it is out of the range [1, " + globalMaxAppAttempts + "]. Use the global max attempts instead."); } else { this.maxAppAttempts = individualMaxAppAttempts; } this.attemptFailuresValidityInterval = submissionContext.getAttemptFailuresValidityInterval(); if (this.attemptFailuresValidityInterval > 0) { LOG.info("The attemptFailuresValidityInterval for the application: " + this.applicationId + " is " + this.attemptFailuresValidityInterval + "."); } ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); this.readLock = lock.readLock(); this.writeLock = lock.writeLock(); this.stateMachine = stateMachineFactory.make(this); rmContext.getRMApplicationHistoryWriter().applicationStarted(this); rmContext.getSystemMetricsPublisher().appCreated(this, startTime); }
Example #23
Source File: MRApp.java From big-c with Apache License 2.0 | 4 votes |
public MRApp(int maps, int reduces, boolean autoComplete, String testName, boolean cleanOnStart, int startCount, boolean unregistered) { this(maps, reduces, autoComplete, testName, cleanOnStart, startCount, new SystemClock(), unregistered); }
Example #24
Source File: ProportionalCapacityPreemptionPolicy.java From big-c with Apache License 2.0 | 4 votes |
public ProportionalCapacityPreemptionPolicy(Configuration config, EventHandler<ContainerPreemptEvent> dispatcher, CapacityScheduler scheduler) { this(config, dispatcher, scheduler, new SystemClock()); }
Example #25
Source File: DelayOperatorTest.java From attic-apex-core with Apache License 2.0 | 4 votes |
@Test public void testCheckpointUpdate() { LogicalPlan dag = StramTestSupport.createDAG(testMeta); TestGeneratorInputOperator opA = dag.addOperator("A", TestGeneratorInputOperator.class); GenericTestOperator opB = dag.addOperator("B", GenericTestOperator.class); GenericTestOperator opC = dag.addOperator("C", GenericTestOperator.class); GenericTestOperator opD = dag.addOperator("D", GenericTestOperator.class); DefaultDelayOperator<Object> opDelay = dag.addOperator("opDelay", new DefaultDelayOperator<>()); dag.addStream("AtoB", opA.outport, opB.inport1); dag.addStream("BtoC", opB.outport1, opC.inport1); dag.addStream("CtoD", opC.outport1, opD.inport1); dag.addStream("CtoDelay", opC.outport2, opDelay.input); dag.addStream("DelayToB", opDelay.output, opB.inport2); dag.validate(); dag.setAttribute(com.datatorrent.api.Context.OperatorContext.STORAGE_AGENT, new MemoryStorageAgent()); StreamingContainerManager scm = new StreamingContainerManager(dag); PhysicalPlan plan = scm.getPhysicalPlan(); // set all operators as active to enable recovery window id update for (PTOperator oper : plan.getAllOperators().values()) { oper.setState(PTOperator.State.ACTIVE); } Clock clock = new SystemClock(); PTOperator opA1 = plan.getOperators(dag.getMeta(opA)).get(0); PTOperator opB1 = plan.getOperators(dag.getMeta(opB)).get(0); PTOperator opC1 = plan.getOperators(dag.getMeta(opC)).get(0); PTOperator opDelay1 = plan.getOperators(dag.getMeta(opDelay)).get(0); PTOperator opD1 = plan.getOperators(dag.getMeta(opD)).get(0); Checkpoint cp3 = new Checkpoint(3L, 0, 0); Checkpoint cp5 = new Checkpoint(5L, 0, 0); Checkpoint cp4 = new Checkpoint(4L, 0, 0); opB1.checkpoints.add(cp3); opC1.checkpoints.add(cp3); opC1.checkpoints.add(cp4); opDelay1.checkpoints.add(cp3); opDelay1.checkpoints.add(cp5); opD1.checkpoints.add(cp5); // construct grouping that would be supplied through LogicalPlan Set<OperatorMeta> stronglyConnected = Sets.newHashSet(dag.getMeta(opB), dag.getMeta(opC), dag.getMeta(opDelay)); Map<OperatorMeta, Set<OperatorMeta>> groups = new HashMap<>(); for (OperatorMeta om : stronglyConnected) { groups.put(om, stronglyConnected); } UpdateCheckpointsContext ctx = new UpdateCheckpointsContext(clock, false, groups); scm.updateRecoveryCheckpoints(opB1, ctx, false); Assert.assertEquals("checkpoint " + opA1, Checkpoint.INITIAL_CHECKPOINT, opA1.getRecoveryCheckpoint()); Assert.assertEquals("checkpoint " + opB1, cp3, opC1.getRecoveryCheckpoint()); Assert.assertEquals("checkpoint " + opC1, cp3, opC1.getRecoveryCheckpoint()); Assert.assertEquals("checkpoint " + opD1, cp5, opD1.getRecoveryCheckpoint()); }
Example #26
Source File: TestTaskAttempt.java From incubator-tez with Apache License 2.0 | 4 votes |
@Test(timeout = 5000) // Ensure ContainerTerminating and ContainerTerminated is handled correctly by // the TaskAttempt public void testContainerTerminatedAfterSuccess() throws Exception { ApplicationId appId = ApplicationId.newInstance(1, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( appId, 0); TezDAGID dagID = TezDAGID.getInstance(appId, 1); TezVertexID vertexID = TezVertexID.getInstance(dagID, 1); TezTaskID taskID = TezTaskID.getInstance(vertexID, 1); TezTaskAttemptID taskAttemptID = TezTaskAttemptID.getInstance(taskID, 0); MockEventHandler eventHandler = spy(new MockEventHandler()); TaskAttemptListener taListener = mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn( new InetSocketAddress("localhost", 0)); Configuration taskConf = new Configuration(); taskConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); taskConf.setBoolean("fs.file.impl.disable.cache", true); TaskLocationHint locationHint = new TaskLocationHint( new HashSet<String>(Arrays.asList(new String[] {"127.0.0.1"})), null); Resource resource = Resource.newInstance(1024, 1); NodeId nid = NodeId.newInstance("127.0.0.1", 0); ContainerId contId = ContainerId.newInstance(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); AppContext appCtx = mock(AppContext.class); AMContainerMap containers = new AMContainerMap( mock(ContainerHeartbeatHandler.class), mock(TaskAttemptListener.class), new ContainerContextMatcher(), appCtx); containers.addContainerIfNew(container); doReturn(new ClusterInfo()).when(appCtx).getClusterInfo(); doReturn(containers).when(appCtx).getAllContainers(); TaskAttemptImpl taImpl = new MockTaskAttemptImpl(taskID, 1, eventHandler, taListener, taskConf, new SystemClock(), mock(TaskHeartbeatHandler.class), appCtx, locationHint, false, resource, createFakeContainerContext(), false); ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class); taImpl.handle(new TaskAttemptEventSchedule(taskAttemptID, null)); // At state STARTING. taImpl.handle(new TaskAttemptEventStartedRemotely(taskAttemptID, contId, null)); assertEquals("Task attempt is not in the RUNNING state", taImpl.getState(), TaskAttemptState.RUNNING); int expectedEventsAtRunning = 3; verify(eventHandler, times(expectedEventsAtRunning)).handle(arg.capture()); taImpl.handle(new TaskAttemptEvent(taskAttemptID, TaskAttemptEventType.TA_DONE)); assertEquals("Task attempt is not in the SUCCEEDED state", taImpl.getState(), TaskAttemptState.SUCCEEDED); assertEquals(0, taImpl.getDiagnostics().size()); int expectedEvenstAfterTerminating = expectedEventsAtRunning + 3; arg = ArgumentCaptor.forClass(Event.class); verify(eventHandler, times(expectedEvenstAfterTerminating)).handle(arg.capture()); verifyEventType( arg.getAllValues().subList(expectedEventsAtRunning, expectedEvenstAfterTerminating), TaskEventTAUpdate.class, 1); verifyEventType( arg.getAllValues().subList(expectedEventsAtRunning, expectedEvenstAfterTerminating), AMSchedulerEventTAEnded.class, 1); taImpl.handle(new TaskAttemptEventContainerTerminated(taskAttemptID, "Terminated")); int expectedEventAfterTerminated = expectedEvenstAfterTerminating + 0; arg = ArgumentCaptor.forClass(Event.class); verify(eventHandler, times(expectedEventAfterTerminated)).handle(arg.capture()); // Verify that the diagnostic message included in the Terminated event is not // captured - TA already succeeded. assertEquals(0, taImpl.getDiagnostics().size()); }
Example #27
Source File: TestWorkPreservingRMRestart.java From big-c with Apache License 2.0 | 4 votes |
@Test (timeout = 20000) public void testNewContainersNotAllocatedDuringSchedulerRecovery() throws Exception { conf.setLong( YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 4000); MemoryRMStateStore memStore = new MemoryRMStateStore(); memStore.init(conf); rm1 = new MockRM(conf, memStore); rm1.start(); MockNM nm1 = new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1 = rm1.submitApp(200); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); // Restart RM rm2 = new MockRM(conf, memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); nm1.registerNode(); ControlledClock clock = new ControlledClock(new SystemClock()); long startTime = System.currentTimeMillis(); ((RMContextImpl)rm2.getRMContext()).setSystemClock(clock); am1.setAMRMProtocol(rm2.getApplicationMasterService(), rm2.getRMContext()); am1.registerAppAttempt(true); rm2.waitForState(app1.getApplicationId(), RMAppState.RUNNING); // AM request for new containers am1.allocate("127.0.0.1", 1000, 1, new ArrayList<ContainerId>()); List<Container> containers = new ArrayList<Container>(); clock.setTime(startTime + 2000); nm1.nodeHeartbeat(true); // sleep some time as allocation happens asynchronously. Thread.sleep(3000); containers.addAll(am1.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>()).getAllocatedContainers()); // container is not allocated during scheduling recovery. Assert.assertTrue(containers.isEmpty()); clock.setTime(startTime + 8000); nm1.nodeHeartbeat(true); // Container is created after recovery is done. while (containers.isEmpty()) { containers.addAll(am1.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>()).getAllocatedContainers()); Thread.sleep(500); } }
Example #28
Source File: MRAppMaster.java From big-c with Apache License 2.0 | 4 votes |
public MRAppMaster(ApplicationAttemptId applicationAttemptId, ContainerId containerId, String nmHost, int nmPort, int nmHttpPort, long appSubmitTime) { this(applicationAttemptId, containerId, nmHost, nmPort, nmHttpPort, new SystemClock(), appSubmitTime); }
Example #29
Source File: SessionLivelinessMonitor.java From tajo with Apache License 2.0 | 4 votes |
public SessionLivelinessMonitor(Dispatcher d) { super(SessionLivelinessMonitor.class.getSimpleName(), new SystemClock()); this.dispatcher = d.getEventHandler(); }
Example #30
Source File: MRApp.java From big-c with Apache License 2.0 | 4 votes |
public MRApp(int maps, int reduces, boolean autoComplete, String testName, boolean cleanOnStart, int startCount) { this(maps, reduces, autoComplete, testName, cleanOnStart, startCount, new SystemClock(), null); }