org.apache.hadoop.yarn.api.records.QueueState Java Examples
The following examples show how to use
org.apache.hadoop.yarn.api.records.QueueState.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestTypeConverter.java From big-c with Apache License 2.0 | 6 votes |
/** * Test that child queues are converted too during conversion of the parent * queue */ @Test public void testFromYarnQueue() { //Define child queue org.apache.hadoop.yarn.api.records.QueueInfo child = Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class); Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING); //Define parent queue org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class); List<org.apache.hadoop.yarn.api.records.QueueInfo> children = new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>(); children.add(child); //Add one child Mockito.when(queueInfo.getChildQueues()).thenReturn(children); Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING); //Call the function we're testing org.apache.hadoop.mapreduce.QueueInfo returned = TypeConverter.fromYarn(queueInfo, new Configuration()); //Verify that the converted queue has the 1 child we had added Assert.assertEquals("QueueInfo children weren't properly converted", returned.getQueueChildren().size(), 1); }
Example #2
Source File: TestTypeConverter.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testEnums() throws Exception { for (YarnApplicationState applicationState : YarnApplicationState.values()) { TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED); } // ad hoc test of NEW_SAVING, which is newly added Assert.assertEquals(State.PREP, TypeConverter.fromYarn( YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED)); for (TaskType taskType : TaskType.values()) { TypeConverter.fromYarn(taskType); } for (JobState jobState : JobState.values()) { TypeConverter.fromYarn(jobState); } for (QueueState queueState : QueueState.values()) { TypeConverter.fromYarn(queueState); } for (TaskState taskState : TaskState.values()) { TypeConverter.fromYarn(taskState); } }
Example #3
Source File: TestYarnCLI.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testGetQueueInfoWithEmptyNodeLabel() throws Exception { QueueCLI cli = createAndGetQueueCLI(); QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f, null, null, QueueState.RUNNING, null, null); when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo); int result = cli.run(new String[] { "-status", "queueA" }); assertEquals(0, result); verify(client).getQueueInfo("queueA"); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Queue Information : "); pw.println("Queue Name : " + "queueA"); pw.println("\tState : " + "RUNNING"); pw.println("\tCapacity : " + "40.0%"); pw.println("\tCurrent Capacity : " + "50.0%"); pw.println("\tMaximum Capacity : " + "80.0%"); pw.println("\tDefault Node Label expression : "); pw.println("\tAccessible Node Labels : "); pw.close(); String queueInfoStr = baos.toString("UTF-8"); Assert.assertEquals(queueInfoStr, sysOutStream.toString()); }
Example #4
Source File: TestYarnCLI.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testGetQueueInfo() throws Exception { QueueCLI cli = createAndGetQueueCLI(); Set<String> nodeLabels = new HashSet<String>(); nodeLabels.add("GPU"); nodeLabels.add("JDK_7"); QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f, null, null, QueueState.RUNNING, nodeLabels, "GPU"); when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo); int result = cli.run(new String[] { "-status", "queueA" }); assertEquals(0, result); verify(client).getQueueInfo("queueA"); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Queue Information : "); pw.println("Queue Name : " + "queueA"); pw.println("\tState : " + "RUNNING"); pw.println("\tCapacity : " + "40.0%"); pw.println("\tCurrent Capacity : " + "50.0%"); pw.println("\tMaximum Capacity : " + "80.0%"); pw.println("\tDefault Node Label expression : " + "GPU"); pw.println("\tAccessible Node Labels : " + "JDK_7,GPU"); pw.close(); String queueInfoStr = baos.toString("UTF-8"); Assert.assertEquals(queueInfoStr, sysOutStream.toString()); }
Example #5
Source File: TestTypeConverter.java From hadoop with Apache License 2.0 | 6 votes |
/** * Test that child queues are converted too during conversion of the parent * queue */ @Test public void testFromYarnQueue() { //Define child queue org.apache.hadoop.yarn.api.records.QueueInfo child = Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class); Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING); //Define parent queue org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class); List<org.apache.hadoop.yarn.api.records.QueueInfo> children = new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>(); children.add(child); //Add one child Mockito.when(queueInfo.getChildQueues()).thenReturn(children); Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING); //Call the function we're testing org.apache.hadoop.mapreduce.QueueInfo returned = TypeConverter.fromYarn(queueInfo, new Configuration()); //Verify that the converted queue has the 1 child we had added Assert.assertEquals("QueueInfo children weren't properly converted", returned.getQueueChildren().size(), 1); }
Example #6
Source File: TestRMWebServices.java From hadoop with Apache License 2.0 | 6 votes |
public void verifyClusterSchedulerFifoGeneric(String type, String state, float capacity, float usedCapacity, int minQueueCapacity, int maxQueueCapacity, int numNodes, int usedNodeCapacity, int availNodeCapacity, int totalNodeCapacity, int numContainers) throws JSONException, Exception { assertEquals("type doesn't match", "fifoScheduler", type); assertEquals("qstate doesn't match", QueueState.RUNNING.toString(), state); assertEquals("capacity doesn't match", 1.0, capacity, 0.0); assertEquals("usedCapacity doesn't match", 0.0, usedCapacity, 0.0); assertEquals( "minQueueMemoryCapacity doesn't match", YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, minQueueCapacity); assertEquals("maxQueueMemoryCapacity doesn't match", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, maxQueueCapacity); assertEquals("numNodes doesn't match", 0, numNodes); assertEquals("usedNodeCapacity doesn't match", 0, usedNodeCapacity); assertEquals("availNodeCapacity doesn't match", 0, availNodeCapacity); assertEquals("totalNodeCapacity doesn't match", 0, totalNodeCapacity); assertEquals("numContainers doesn't match", 0, numContainers); }
Example #7
Source File: FifoScheduler.java From big-c with Apache License 2.0 | 6 votes |
@Override public QueueInfo getQueueInfo( boolean includeChildQueues, boolean recursive) { QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class); queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName()); queueInfo.setCapacity(1.0f); if (clusterResource.getMemory() == 0) { queueInfo.setCurrentCapacity(0.0f); } else { queueInfo.setCurrentCapacity((float) usedResource.getMemory() / clusterResource.getMemory()); } queueInfo.setMaximumCapacity(1.0f); queueInfo.setChildQueues(new ArrayList<QueueInfo>()); queueInfo.setQueueState(QueueState.RUNNING); return queueInfo; }
Example #8
Source File: TestRMWebServices.java From big-c with Apache License 2.0 | 6 votes |
public void verifyClusterSchedulerFifoGeneric(String type, String state, float capacity, float usedCapacity, int minQueueCapacity, int maxQueueCapacity, int numNodes, int usedNodeCapacity, int availNodeCapacity, int totalNodeCapacity, int numContainers) throws JSONException, Exception { assertEquals("type doesn't match", "fifoScheduler", type); assertEquals("qstate doesn't match", QueueState.RUNNING.toString(), state); assertEquals("capacity doesn't match", 1.0, capacity, 0.0); assertEquals("usedCapacity doesn't match", 0.0, usedCapacity, 0.0); assertEquals( "minQueueMemoryCapacity doesn't match", YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, minQueueCapacity); assertEquals("maxQueueMemoryCapacity doesn't match", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, maxQueueCapacity); assertEquals("numNodes doesn't match", 0, numNodes); assertEquals("usedNodeCapacity doesn't match", 0, usedNodeCapacity); assertEquals("availNodeCapacity doesn't match", 0, availNodeCapacity); assertEquals("totalNodeCapacity doesn't match", 0, totalNodeCapacity); assertEquals("numContainers doesn't match", 0, numContainers); }
Example #9
Source File: TestYarnCLI.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testGetQueueInfo() throws Exception { QueueCLI cli = createAndGetQueueCLI(); Set<String> nodeLabels = new HashSet<String>(); nodeLabels.add("GPU"); nodeLabels.add("JDK_7"); QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f, null, null, QueueState.RUNNING, nodeLabels, "GPU"); when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo); int result = cli.run(new String[] { "-status", "queueA" }); assertEquals(0, result); verify(client).getQueueInfo("queueA"); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Queue Information : "); pw.println("Queue Name : " + "queueA"); pw.println("\tState : " + "RUNNING"); pw.println("\tCapacity : " + "40.0%"); pw.println("\tCurrent Capacity : " + "50.0%"); pw.println("\tMaximum Capacity : " + "80.0%"); pw.println("\tDefault Node Label expression : " + "GPU"); pw.println("\tAccessible Node Labels : " + "JDK_7,GPU"); pw.close(); String queueInfoStr = baos.toString("UTF-8"); Assert.assertEquals(queueInfoStr, sysOutStream.toString()); }
Example #10
Source File: FifoScheduler.java From hadoop with Apache License 2.0 | 6 votes |
@Override public QueueInfo getQueueInfo( boolean includeChildQueues, boolean recursive) { QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class); queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName()); queueInfo.setCapacity(1.0f); if (clusterResource.getMemory() == 0) { queueInfo.setCurrentCapacity(0.0f); } else { queueInfo.setCurrentCapacity((float) usedResource.getMemory() / clusterResource.getMemory()); } queueInfo.setMaximumCapacity(1.0f); queueInfo.setChildQueues(new ArrayList<QueueInfo>()); queueInfo.setQueueState(QueueState.RUNNING); return queueInfo; }
Example #11
Source File: TestYarnCLI.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testGetQueueInfoWithEmptyNodeLabel() throws Exception { QueueCLI cli = createAndGetQueueCLI(); QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f, null, null, QueueState.RUNNING, null, null); when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo); int result = cli.run(new String[] { "-status", "queueA" }); assertEquals(0, result); verify(client).getQueueInfo("queueA"); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Queue Information : "); pw.println("Queue Name : " + "queueA"); pw.println("\tState : " + "RUNNING"); pw.println("\tCapacity : " + "40.0%"); pw.println("\tCurrent Capacity : " + "50.0%"); pw.println("\tMaximum Capacity : " + "80.0%"); pw.println("\tDefault Node Label expression : "); pw.println("\tAccessible Node Labels : "); pw.close(); String queueInfoStr = baos.toString("UTF-8"); Assert.assertEquals(queueInfoStr, sysOutStream.toString()); }
Example #12
Source File: TestTypeConverter.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testEnums() throws Exception { for (YarnApplicationState applicationState : YarnApplicationState.values()) { TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED); } // ad hoc test of NEW_SAVING, which is newly added Assert.assertEquals(State.PREP, TypeConverter.fromYarn( YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED)); for (TaskType taskType : TaskType.values()) { TypeConverter.fromYarn(taskType); } for (JobState jobState : JobState.values()) { TypeConverter.fromYarn(jobState); } for (QueueState queueState : QueueState.values()) { TypeConverter.fromYarn(queueState); } for (TaskState taskState : TaskState.values()) { TypeConverter.fromYarn(taskState); } }
Example #13
Source File: ParentQueue.java From big-c with Apache License 2.0 | 5 votes |
@Override public void submitApplication(ApplicationId applicationId, String user, String queue) throws AccessControlException { synchronized (this) { // Sanity check if (queue.equals(queueName)) { throw new AccessControlException("Cannot submit application " + "to non-leaf queue: " + queueName); } if (state != QueueState.RUNNING) { throw new AccessControlException("Queue " + getQueuePath() + " is STOPPED. Cannot accept submission of application: " + applicationId); } addApplication(applicationId, user); } // Inform the parent queue if (parent != null) { try { parent.submitApplication(applicationId, user, queue); } catch (AccessControlException ace) { LOG.info("Failed to submit application to parent-queue: " + parent.getQueuePath(), ace); removeApplication(applicationId, user); throw ace; } } }
Example #14
Source File: FSQueue.java From big-c with Apache License 2.0 | 5 votes |
@Override public QueueInfo getQueueInfo(boolean includeChildQueues, boolean recursive) { QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class); queueInfo.setQueueName(getQueueName()); if (scheduler.getClusterResource().getMemory() == 0) { queueInfo.setCapacity(0.0f); } else { queueInfo.setCapacity((float) getFairShare().getMemory() / scheduler.getClusterResource().getMemory()); } if (getFairShare().getMemory() == 0) { queueInfo.setCurrentCapacity(0.0f); } else { queueInfo.setCurrentCapacity((float) getResourceUsage().getMemory() / getFairShare().getMemory()); } ArrayList<QueueInfo> childQueueInfos = new ArrayList<QueueInfo>(); if (includeChildQueues) { Collection<FSQueue> childQueues = getChildQueues(); for (FSQueue child : childQueues) { childQueueInfos.add(child.getQueueInfo(recursive, recursive)); } } queueInfo.setChildQueues(childQueueInfos); queueInfo.setQueueState(QueueState.RUNNING); return queueInfo; }
Example #15
Source File: QueueInfoPBImpl.java From big-c with Apache License 2.0 | 5 votes |
@Override public void setQueueState(QueueState queueState) { maybeInitBuilder(); if (queueState == null) { builder.clearState(); return; } builder.setState(convertToProtoFormat(queueState)); }
Example #16
Source File: QueueInfoPBImpl.java From big-c with Apache License 2.0 | 5 votes |
@Override public QueueState getQueueState() { QueueInfoProtoOrBuilder p = viaProto ? proto : builder; if (!p.hasState()) { return null; } return convertFromProtoFormat(p.getState()); }
Example #17
Source File: TypeConverter.java From big-c with Apache License 2.0 | 5 votes |
public static org.apache.hadoop.mapreduce.QueueState fromYarn( QueueState state) { org.apache.hadoop.mapreduce.QueueState qState = org.apache.hadoop.mapreduce.QueueState.getState( StringUtils.toLowerCase(state.toString())); return qState; }
Example #18
Source File: TestTypeConverter.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testFromYarnQueueInfo() { org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = Records .newRecord(org.apache.hadoop.yarn.api.records.QueueInfo.class); queueInfo.setQueueState(org.apache.hadoop.yarn.api.records.QueueState.STOPPED); org.apache.hadoop.mapreduce.QueueInfo returned = TypeConverter.fromYarn(queueInfo, new Configuration()); Assert.assertEquals("queueInfo translation didn't work.", returned.getState().toString(), StringUtils.toLowerCase(queueInfo.getQueueState().toString())); }
Example #19
Source File: TestTypeConverter.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testFromYarnQueueInfo() { org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = Records .newRecord(org.apache.hadoop.yarn.api.records.QueueInfo.class); queueInfo.setQueueState(org.apache.hadoop.yarn.api.records.QueueState.STOPPED); org.apache.hadoop.mapreduce.QueueInfo returned = TypeConverter.fromYarn(queueInfo, new Configuration()); Assert.assertEquals("queueInfo translation didn't work.", returned.getState().toString(), StringUtils.toLowerCase(queueInfo.getQueueState().toString())); }
Example #20
Source File: QueueInfoPBImpl.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void setQueueState(QueueState queueState) { maybeInitBuilder(); if (queueState == null) { builder.clearState(); return; } builder.setState(convertToProtoFormat(queueState)); }
Example #21
Source File: TypeConverter.java From hadoop with Apache License 2.0 | 5 votes |
public static org.apache.hadoop.mapreduce.QueueState fromYarn( QueueState state) { org.apache.hadoop.mapreduce.QueueState qState = org.apache.hadoop.mapreduce.QueueState.getState( StringUtils.toLowerCase(state.toString())); return qState; }
Example #22
Source File: QueueInfoPBImpl.java From hadoop with Apache License 2.0 | 5 votes |
@Override public QueueState getQueueState() { QueueInfoProtoOrBuilder p = viaProto ? proto : builder; if (!p.hasState()) { return null; } return convertFromProtoFormat(p.getState()); }
Example #23
Source File: FSQueue.java From hadoop with Apache License 2.0 | 5 votes |
@Override public QueueInfo getQueueInfo(boolean includeChildQueues, boolean recursive) { QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class); queueInfo.setQueueName(getQueueName()); if (scheduler.getClusterResource().getMemory() == 0) { queueInfo.setCapacity(0.0f); } else { queueInfo.setCapacity((float) getFairShare().getMemory() / scheduler.getClusterResource().getMemory()); } if (getFairShare().getMemory() == 0) { queueInfo.setCurrentCapacity(0.0f); } else { queueInfo.setCurrentCapacity((float) getResourceUsage().getMemory() / getFairShare().getMemory()); } ArrayList<QueueInfo> childQueueInfos = new ArrayList<QueueInfo>(); if (includeChildQueues) { Collection<FSQueue> childQueues = getChildQueues(); for (FSQueue child : childQueues) { childQueueInfos.add(child.getQueueInfo(recursive, recursive)); } } queueInfo.setChildQueues(childQueueInfos); queueInfo.setQueueState(QueueState.RUNNING); return queueInfo; }
Example #24
Source File: ParentQueue.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void submitApplication(ApplicationId applicationId, String user, String queue) throws AccessControlException { synchronized (this) { // Sanity check if (queue.equals(queueName)) { throw new AccessControlException("Cannot submit application " + "to non-leaf queue: " + queueName); } if (state != QueueState.RUNNING) { throw new AccessControlException("Queue " + getQueuePath() + " is STOPPED. Cannot accept submission of application: " + applicationId); } addApplication(applicationId, user); } // Inform the parent queue if (parent != null) { try { parent.submitApplication(applicationId, user, queue); } catch (AccessControlException ace) { LOG.info("Failed to submit application to parent-queue: " + parent.getQueuePath(), ace); removeApplication(applicationId, user); throw ace; } } }
Example #25
Source File: ProtocolHATestBase.java From hadoop with Apache License 2.0 | 4 votes |
public QueueInfo createFakeQueueInfo() { return QueueInfo.newInstance("root", 100f, 100f, 50f, null, createFakeAppReports(), QueueState.RUNNING, null, null); }
Example #26
Source File: QueueInfoPBImpl.java From hadoop with Apache License 2.0 | 4 votes |
private QueueState convertFromProtoFormat(QueueStateProto q) { return ProtoUtils.convertFromProtoFormat(q); }
Example #27
Source File: QueueInfoPBImpl.java From hadoop with Apache License 2.0 | 4 votes |
private QueueStateProto convertToProtoFormat(QueueState queueState) { return ProtoUtils.convertToProtoFormat(queueState); }
Example #28
Source File: ProtoUtils.java From hadoop with Apache License 2.0 | 4 votes |
public static QueueStateProto convertToProtoFormat(QueueState e) { return QueueStateProto.valueOf(QUEUE_STATE_PREFIX + e.name()); }
Example #29
Source File: ProtoUtils.java From hadoop with Apache License 2.0 | 4 votes |
public static QueueState convertFromProtoFormat(QueueStateProto e) { return QueueState.valueOf(e.name().replace(QUEUE_STATE_PREFIX, "")); }
Example #30
Source File: ProtocolHATestBase.java From big-c with Apache License 2.0 | 4 votes |
public QueueInfo createFakeQueueInfo() { return QueueInfo.newInstance("root", 100f, 100f, 50f, null, createFakeAppReports(), QueueState.RUNNING, null, null); }