org.apache.hadoop.yarn.api.records.NodeState Java Examples
The following examples show how to use
org.apache.hadoop.yarn.api.records.NodeState.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestRMWebServicesNodes.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testNodesQueryRunning() throws JSONException, Exception { WebResource r = resource(); MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm2 = rm.registerNode("h2:1235", 5121); rm.sendNodeStarted(nm1); rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING); rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW); ClientResponse response = r.path("ws").path("v1").path("cluster") .path("nodes").queryParam("states", "running") .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); JSONObject nodes = json.getJSONObject("nodes"); assertEquals("incorrect number of elements", 1, nodes.length()); JSONArray nodeArray = nodes.getJSONArray("node"); assertEquals("incorrect number of elements", 1, nodeArray.length()); }
Example #2
Source File: TestRMNodeTransitions.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testResourceUpdateOnRunningNode() { RMNodeImpl node = getRunningNode(); Resource oldCapacity = node.getTotalCapability(); assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096); assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4); node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(), ResourceOption.newInstance(Resource.newInstance(2048, 2), RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT))); Resource newCapacity = node.getTotalCapability(); assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048); assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2); Assert.assertEquals(NodeState.RUNNING, node.getState()); Assert.assertNotNull(nodesListManagerEvent); Assert.assertEquals(NodesListManagerEventType.NODE_USABLE, nodesListManagerEvent.getType()); }
Example #3
Source File: NodeCLI.java From hadoop with Apache License 2.0 | 6 votes |
/** * Lists the nodes matching the given node states * * @param nodeStates * @throws YarnException * @throws IOException */ private void listClusterNodes(Set<NodeState> nodeStates) throws YarnException, IOException { PrintWriter writer = new PrintWriter( new OutputStreamWriter(sysout, Charset.forName("UTF-8"))); List<NodeReport> nodesReport = client.getNodeReports( nodeStates.toArray(new NodeState[0])); writer.println("Total Nodes:" + nodesReport.size()); writer.printf(NODES_PATTERN, "Node-Id", "Node-State", "Node-Http-Address", "Number-of-Running-Containers"); for (NodeReport nodeReport : nodesReport) { writer.printf(NODES_PATTERN, nodeReport.getNodeId(), nodeReport .getNodeState(), nodeReport.getHttpAddress(), nodeReport .getNumContainers()); } writer.flush(); }
Example #4
Source File: TestRMNodeTransitions.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testResourceUpdateOnNewNode() { RMNodeImpl node = getNewNode(Resource.newInstance(4096, 4, 4)); Resource oldCapacity = node.getTotalCapability(); assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096); assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4); node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(), ResourceOption.newInstance(Resource.newInstance(2048, 2, 2), RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT))); Resource newCapacity = node.getTotalCapability(); assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048); assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2); assertEquals("GPU resource is not match.", newCapacity.getGpuCores(), 2); Assert.assertEquals(NodeState.NEW, node.getState()); }
Example #5
Source File: AbstractYarnClusterDescriptor.java From flink with Apache License 2.0 | 6 votes |
private ClusterResourceDescription getCurrentFreeClusterResources(YarnClient yarnClient) throws YarnException, IOException { List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING); int totalFreeMemory = 0; int containerLimit = 0; int[] nodeManagersFree = new int[nodes.size()]; for (int i = 0; i < nodes.size(); i++) { NodeReport rep = nodes.get(i); int free = rep.getCapability().getMemory() - (rep.getUsed() != null ? rep.getUsed().getMemory() : 0); nodeManagersFree[i] = free; totalFreeMemory += free; if (free > containerLimit) { containerLimit = free; } } return new ClusterResourceDescription(totalFreeMemory, containerLimit, nodeManagersFree); }
Example #6
Source File: ClusterProfilingHelper.java From jumbune with GNU Lesser General Public License v3.0 | 6 votes |
/** * Get the total memory available in cluster. * @param rmCommunicator * * @return the total memory available in cluster * @throws YarnException the yarn exception * @throws IOException Signals that an I/O exception has occurred. */ private int getTotalMemoryAvailableInCluster(RMCommunicator rmCommunicator) throws YarnException, IOException{ List<NodeReport> reports=rmCommunicator.getNodeReports(); int availableMemory=0; Set<String> hostname = new HashSet<String>(); for (NodeReport nodeReport : reports) { if(!hostname.contains(nodeReport.getHttpAddress()) && nodeReport.getNodeState().equals(NodeState.RUNNING)){ hostname.add(nodeReport.getHttpAddress()); availableMemory+=nodeReport.getCapability().getMemory() - (nodeReport.getUsed()==null?0:nodeReport.getUsed().getMemory()); } } return availableMemory; }
Example #7
Source File: TestRMNodeTransitions.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testReconnect() { RMNodeImpl node = getRunningNode(); ClusterMetrics cm = ClusterMetrics.getMetrics(); int initialActive = cm.getNumActiveNMs(); int initialLost = cm.getNumLostNMs(); int initialUnhealthy = cm.getUnhealthyNMs(); int initialDecommissioned = cm.getNumDecommisionedNMs(); int initialRebooted = cm.getNumRebootedNMs(); node.handle(new RMNodeReconnectEvent(node.getNodeID(), node, null, null)); Assert.assertEquals("Active Nodes", initialActive, cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes", initialUnhealthy, cm.getUnhealthyNMs()); Assert.assertEquals("Decommissioned Nodes", initialDecommissioned, cm.getNumDecommisionedNMs()); Assert.assertEquals("Rebooted Nodes", initialRebooted, cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.RUNNING, node.getState()); Assert.assertNotNull(nodesListManagerEvent); Assert.assertEquals(NodesListManagerEventType.NODE_USABLE, nodesListManagerEvent.getType()); }
Example #8
Source File: TestYarnCLI.java From hadoop with Apache License 2.0 | 6 votes |
private List<NodeReport> getNodeReports(int noOfNodes, NodeState state, boolean emptyNodeLabel) { List<NodeReport> nodeReports = new ArrayList<NodeReport>(); for (int i = 0; i < noOfNodes; i++) { Set<String> nodeLabels = null; if (!emptyNodeLabel) { // node labels is not ordered, but when we output it, it should be // ordered nodeLabels = ImmutableSet.of("c", "b", "a", "x", "z", "y"); } NodeReport nodeReport = NodeReport.newInstance(NodeId .newInstance("host" + i, 0), state, "host" + 1 + ":8888", "rack1", Records.newRecord(Resource.class), Records .newRecord(Resource.class), 0, "", 0, nodeLabels); nodeReports.add(nodeReport); } return nodeReports; }
Example #9
Source File: AbstractYarnClusterDescriptor.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private ClusterResourceDescription getCurrentFreeClusterResources(YarnClient yarnClient) throws YarnException, IOException { List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING); int totalFreeMemory = 0; int containerLimit = 0; int[] nodeManagersFree = new int[nodes.size()]; for (int i = 0; i < nodes.size(); i++) { NodeReport rep = nodes.get(i); int free = rep.getCapability().getMemory() - (rep.getUsed() != null ? rep.getUsed().getMemory() : 0); nodeManagersFree[i] = free; totalFreeMemory += free; if (free > containerLimit) { containerLimit = free; } } return new ClusterResourceDescription(totalFreeMemory, containerLimit, nodeManagersFree); }
Example #10
Source File: TestResourceTrackerService.java From big-c with Apache License 2.0 | 6 votes |
private void checkUnealthyNMCount(MockRM rm, MockNM nm1, boolean health, int count) throws Exception { int waitCount = 0; while((rm.getRMContext().getRMNodes().get(nm1.getNodeId()) .getState() != NodeState.UNHEALTHY) == health && waitCount++ < 20) { synchronized (this) { wait(100); } } Assert.assertFalse((rm.getRMContext().getRMNodes().get(nm1.getNodeId()) .getState() != NodeState.UNHEALTHY) == health); Assert.assertEquals("Unhealthy metrics not incremented", count, ClusterMetrics.getMetrics().getUnhealthyNMs()); }
Example #11
Source File: TestRMNodeTransitions.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testRunningRebooting() { RMNodeImpl node = getRunningNode(); ClusterMetrics cm = ClusterMetrics.getMetrics(); int initialActive = cm.getNumActiveNMs(); int initialLost = cm.getNumLostNMs(); int initialUnhealthy = cm.getUnhealthyNMs(); int initialDecommissioned = cm.getNumDecommisionedNMs(); int initialRebooted = cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.REBOOTING)); Assert.assertEquals("Active Nodes", initialActive - 1, cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes", initialUnhealthy, cm.getUnhealthyNMs()); Assert.assertEquals("Decommissioned Nodes", initialDecommissioned, cm.getNumDecommisionedNMs()); Assert.assertEquals("Rebooted Nodes", initialRebooted + 1, cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.REBOOTED, node.getState()); }
Example #12
Source File: TestRMNodeTransitions.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testRunningExpire() { RMNodeImpl node = getRunningNode(); ClusterMetrics cm = ClusterMetrics.getMetrics(); int initialActive = cm.getNumActiveNMs(); int initialLost = cm.getNumLostNMs(); int initialUnhealthy = cm.getUnhealthyNMs(); int initialDecommissioned = cm.getNumDecommisionedNMs(); int initialRebooted = cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.EXPIRE)); Assert.assertEquals("Active Nodes", initialActive - 1, cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes", initialLost + 1, cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes", initialUnhealthy, cm.getUnhealthyNMs()); Assert.assertEquals("Decommissioned Nodes", initialDecommissioned, cm.getNumDecommisionedNMs()); Assert.assertEquals("Rebooted Nodes", initialRebooted, cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.LOST, node.getState()); }
Example #13
Source File: ClusterProfilingHelper.java From jumbune with GNU Lesser General Public License v3.0 | 6 votes |
/** * Get the available v cores in cluster. * @param rmCommunicator * * @return the available v cores in cluster * @throws IOException Signals that an I/O exception has occurred. */ private int getAvailableVCoresInCluster(RMCommunicator rmCommunicator) throws IOException { List<NodeReport> nodeReports = null; try { nodeReports = rmCommunicator.getNodeReports(); } catch (YarnException e) { LOGGER.error(JumbuneRuntimeException.throwYarnException(e.getStackTrace())); } Set<String> hostname = new HashSet<String>(); int totalVCores = 0; int usedVCores = 0; for(NodeReport report: nodeReports){ if(!hostname.contains(report.getHttpAddress()) && report.getNodeState().equals(NodeState.RUNNING)){ hostname.add(report.getHttpAddress()); totalVCores += report.getCapability().getVirtualCores(); if(report.getUsed()!=null){ usedVCores += report.getUsed().getVirtualCores(); } } } int availableVCores = totalVCores - usedVCores; return availableVCores ; }
Example #14
Source File: TestRMNodeTransitions.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testRunningDecommission() { RMNodeImpl node = getRunningNode(); ClusterMetrics cm = ClusterMetrics.getMetrics(); int initialActive = cm.getNumActiveNMs(); int initialLost = cm.getNumLostNMs(); int initialUnhealthy = cm.getUnhealthyNMs(); int initialDecommissioned = cm.getNumDecommisionedNMs(); int initialRebooted = cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.DECOMMISSION)); Assert.assertEquals("Active Nodes", initialActive - 1, cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes", initialUnhealthy, cm.getUnhealthyNMs()); Assert.assertEquals("Decommissioned Nodes", initialDecommissioned + 1, cm.getNumDecommisionedNMs()); Assert.assertEquals("Rebooted Nodes", initialRebooted, cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.DECOMMISSIONED, node.getState()); }
Example #15
Source File: MockNodes.java From hadoop with Apache License 2.0 | 6 votes |
public MockRMNodeImpl(NodeId nodeId, String nodeAddr, String httpAddress, Resource perNode, String rackName, String healthReport, long lastHealthReportTime, int cmdPort, String hostName, NodeState state, Set<String> labels) { this.nodeId = nodeId; this.nodeAddr = nodeAddr; this.httpAddress = httpAddress; this.perNode = perNode; this.rackName = rackName; this.healthReport = healthReport; this.lastHealthReportTime = lastHealthReportTime; this.cmdPort = cmdPort; this.hostName = hostName; this.state = state; this.labels = labels; }
Example #16
Source File: RMNodeImpl.java From hadoop with Apache License 2.0 | 6 votes |
private void updateMetricsForRejoinedNode(NodeState previousNodeState) { ClusterMetrics metrics = ClusterMetrics.getMetrics(); metrics.incrNumActiveNodes(); switch (previousNodeState) { case LOST: metrics.decrNumLostNMs(); break; case REBOOTED: metrics.decrNumRebootedNMs(); break; case DECOMMISSIONED: metrics.decrDecommisionedNMs(); break; case UNHEALTHY: metrics.decrNumUnhealthyNMs(); break; default: LOG.debug("Unexpected previous node state"); } }
Example #17
Source File: JobImpl.java From hadoop with Apache License 2.0 | 6 votes |
private void actOnUnusableNode(NodeId nodeId, NodeState nodeState) { // rerun previously successful map tasks List<TaskAttemptId> taskAttemptIdList = nodesToSucceededTaskAttempts.get(nodeId); if(taskAttemptIdList != null) { String mesg = "TaskAttempt killed because it ran on unusable node " + nodeId; for(TaskAttemptId id : taskAttemptIdList) { if(TaskType.MAP == id.getTaskId().getTaskType()) { // reschedule only map tasks because their outputs maybe unusable LOG.info(mesg + ". AttemptId:" + id); eventHandler.handle(new TaskAttemptKillEvent(id, mesg)); } } } // currently running task attempts on unusable nodes are handled in // RMContainerAllocator }
Example #18
Source File: MockNodes.java From big-c with Apache License 2.0 | 6 votes |
public static List<RMNode> newNodes(int racks, int nodesPerRack, Resource perNode) { List<RMNode> list = Lists.newArrayList(); for (int i = 0; i < racks; ++i) { for (int j = 0; j < nodesPerRack; ++j) { if (j == (nodesPerRack - 1)) { // One unhealthy node per rack. list.add(nodeInfo(i, perNode, NodeState.UNHEALTHY)); } if (j == 0) { // One node with label list.add(nodeInfo(i, perNode, NodeState.RUNNING, ImmutableSet.of("x"))); } else { list.add(newNodeInfo(i, perNode)); } } } return list; }
Example #19
Source File: TestRMNodeTransitions.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testUnhealthyExpire() { RMNodeImpl node = getUnhealthyNode(); ClusterMetrics cm = ClusterMetrics.getMetrics(); int initialActive = cm.getNumActiveNMs(); int initialLost = cm.getNumLostNMs(); int initialUnhealthy = cm.getUnhealthyNMs(); int initialDecommissioned = cm.getNumDecommisionedNMs(); int initialRebooted = cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.EXPIRE)); Assert.assertEquals("Active Nodes", initialActive, cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes", initialLost + 1, cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes", initialUnhealthy - 1, cm.getUnhealthyNMs()); Assert.assertEquals("Decommissioned Nodes", initialDecommissioned, cm.getNumDecommisionedNMs()); Assert.assertEquals("Rebooted Nodes", initialRebooted, cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.LOST, node.getState()); }
Example #20
Source File: TestAMNodeTracker.java From tez with Apache License 2.0 | 6 votes |
@Test(timeout=5000) public void testHealthUpdateUnknownNode() { AppContext appContext = mock(AppContext.class); AMNodeTracker amNodeTracker = new AMNodeTracker(eventHandler, appContext); doReturn(amNodeTracker).when(appContext).getNodeTracker(); amNodeTracker.init(new Configuration(false)); amNodeTracker.start(); NodeId nodeId = NodeId.newInstance("unknownhost", 2342); NodeReport nodeReport = generateNodeReport(nodeId, NodeState.UNHEALTHY); amNodeTracker.handle(new AMNodeEventStateChanged(nodeReport, 0)); dispatcher.await(); amNodeTracker.stop(); // No exceptions - the status update was ignored. Not bothering to capture // the log message for verification. }
Example #21
Source File: JobImpl.java From big-c with Apache License 2.0 | 6 votes |
private void actOnUnusableNode(NodeId nodeId, NodeState nodeState) { // rerun previously successful map tasks List<TaskAttemptId> taskAttemptIdList = nodesToSucceededTaskAttempts.get(nodeId); if(taskAttemptIdList != null) { String mesg = "TaskAttempt killed because it ran on unusable node " + nodeId; for(TaskAttemptId id : taskAttemptIdList) { if(TaskType.MAP == id.getTaskId().getTaskType()) { // reschedule only map tasks because their outputs maybe unusable LOG.info(mesg + ". AttemptId:" + id); eventHandler.handle(new TaskAttemptKillEvent(id, mesg)); } } } // currently running task attempts on unusable nodes are handled in // RMContainerAllocator }
Example #22
Source File: MockNodes.java From hadoop with Apache License 2.0 | 6 votes |
public static List<RMNode> newNodes(int racks, int nodesPerRack, Resource perNode) { List<RMNode> list = Lists.newArrayList(); for (int i = 0; i < racks; ++i) { for (int j = 0; j < nodesPerRack; ++j) { if (j == (nodesPerRack - 1)) { // One unhealthy node per rack. list.add(nodeInfo(i, perNode, NodeState.UNHEALTHY)); } if (j == 0) { // One node with label list.add(nodeInfo(i, perNode, NodeState.RUNNING, ImmutableSet.of("x"))); } else { list.add(newNodeInfo(i, perNode)); } } } return list; }
Example #23
Source File: TestRMWebServicesNodes.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testQueryAll() throws Exception { WebResource r = resource(); MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm2 = rm.registerNode("h2:1235", 5121); MockNM nm3 = rm.registerNode("h3:1236", 5122); rm.sendNodeStarted(nm1); rm.sendNodeStarted(nm3); rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING); rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW); rm.sendNodeLost(nm3); ClientResponse response = r.path("ws").path("v1").path("cluster") .path("nodes") .queryParam("states", Joiner.on(',').join(EnumSet.allOf(NodeState.class))) .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); JSONObject nodes = json.getJSONObject("nodes"); assertEquals("incorrect number of elements", 1, nodes.length()); JSONArray nodeArray = nodes.getJSONArray("node"); assertEquals("incorrect number of elements", 3, nodeArray.length()); }
Example #24
Source File: TestRMWebServicesNodes.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testNodesQueryNew() throws JSONException, Exception { WebResource r = resource(); MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm2 = rm.registerNode("h2:1235", 5121); rm.sendNodeStarted(nm1); rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING); rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW); ClientResponse response = r.path("ws").path("v1").path("cluster") .path("nodes").queryParam("states", NodeState.NEW.toString()) .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); JSONObject nodes = json.getJSONObject("nodes"); assertEquals("incorrect number of elements", 1, nodes.length()); JSONArray nodeArray = nodes.getJSONArray("node"); assertEquals("incorrect number of elements", 1, nodeArray.length()); JSONObject info = nodeArray.getJSONObject(0); verifyNodeInfo(info, nm2); }
Example #25
Source File: RMNodeImpl.java From big-c with Apache License 2.0 | 6 votes |
public void handle(RMNodeEvent event) { LOG.debug("Processing " + event.getNodeId() + " of type " + event.getType()); try { writeLock.lock(); NodeState oldState = getState(); try { stateMachine.doTransition(event.getType(), event); } catch (InvalidStateTransitonException e) { LOG.error("Can't handle this event at current state", e); LOG.error("Invalid event " + event.getType() + " on Node " + this.nodeId); } if (oldState != getState()) { LOG.info(nodeId + " Node Transitioned from " + oldState + " to " + getState()); } } finally { writeLock.unlock(); } }
Example #26
Source File: TestRMWebApp.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testNodesPage() { // 10 nodes. Two of each type. final RMContext rmContext = mockRMContext(3, 2, 12, 8*GiB); Injector injector = WebAppTests.createMockInjector(RMContext.class, rmContext, new Module() { @Override public void configure(Binder binder) { try { binder.bind(ResourceManager.class).toInstance(mockRm(rmContext)); } catch (IOException e) { throw new IllegalStateException(e); } } }); // All nodes NodesPage instance = injector.getInstance(NodesPage.class); instance.render(); WebAppTests.flushOutput(injector); // Unhealthy nodes instance.moreParams().put(YarnWebParams.NODE_STATE, NodeState.UNHEALTHY.toString()); instance.render(); WebAppTests.flushOutput(injector); // Lost nodes instance.moreParams().put(YarnWebParams.NODE_STATE, NodeState.LOST.toString()); instance.render(); WebAppTests.flushOutput(injector); }
Example #27
Source File: ProtocolHATestBase.java From big-c with Apache License 2.0 | 5 votes |
public List<NodeReport> createFakeNodeReports() { NodeId nodeId = NodeId.newInstance("localhost", 0); NodeReport report = NodeReport.newInstance(nodeId, NodeState.RUNNING, "localhost", "rack1", null, null, 4, null, 1000l, null); List<NodeReport> reports = new ArrayList<NodeReport>(); reports.add(report); return reports; }
Example #28
Source File: GetClusterNodesRequestPBImpl.java From big-c with Apache License 2.0 | 5 votes |
private void initNodeStates() { if (this.states != null) { return; } GetClusterNodesRequestProtoOrBuilder p = viaProto ? proto : builder; List<NodeStateProto> list = p.getNodeStatesList(); this.states = EnumSet.noneOf(NodeState.class); for (NodeStateProto c : list) { this.states.add(ProtoUtils.convertFromProtoFormat(c)); } }
Example #29
Source File: TestRMWebServicesNodes.java From hadoop with Apache License 2.0 | 5 votes |
public void testNodesHelper(String path, String media) throws JSONException, Exception { WebResource r = resource(); MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm2 = rm.registerNode("h2:1235", 5121); rm.sendNodeStarted(nm1); rm.sendNodeStarted(nm2); rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING); rm.NMwaitForState(nm2.getNodeId(), NodeState.RUNNING); ClientResponse response = r.path("ws").path("v1").path("cluster") .path(path).accept(media).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); JSONObject nodes = json.getJSONObject("nodes"); assertEquals("incorrect number of elements", 1, nodes.length()); JSONArray nodeArray = nodes.getJSONArray("node"); assertEquals("incorrect number of elements", 2, nodeArray.length()); JSONObject info = nodeArray.getJSONObject(0); String id = info.get("id").toString(); if (id.matches("h1:1234")) { verifyNodeInfo(info, nm1); verifyNodeInfo(nodeArray.getJSONObject(1), nm2); } else { verifyNodeInfo(info, nm2); verifyNodeInfo(nodeArray.getJSONObject(1), nm1); } }
Example #30
Source File: GetClusterNodesRequestPBImpl.java From big-c with Apache License 2.0 | 5 votes |
private void mergeLocalToBuilder() { if (this.states != null) { maybeInitBuilder(); builder.clearNodeStates(); Iterable<NodeStateProto> iterable = new Iterable<NodeStateProto>() { @Override public Iterator<NodeStateProto> iterator() { return new Iterator<NodeStateProto>() { Iterator<NodeState> iter = states.iterator(); @Override public boolean hasNext() { return iter.hasNext(); } @Override public NodeStateProto next() { return ProtoUtils.convertToProtoFormat(iter.next()); } @Override public void remove() { throw new UnsupportedOperationException(); } }; } }; builder.addAllNodeStates(iterable); } }