Java Code Examples for org.apache.hadoop.yarn.util.resource.Resources#add()
The following examples show how to use
org.apache.hadoop.yarn.util.resource.Resources#add() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FSParentQueue.java From big-c with Apache License 2.0 | 6 votes |
@Override public void updateDemand() { // Compute demand by iterating through apps in the queue // Limit demand to maxResources Resource maxRes = scheduler.getAllocationConfiguration() .getMaxResources(getName()); demand = Resources.createResource(0); for (FSQueue childQueue : childQueues) { childQueue.updateDemand(); Resource toAdd = childQueue.getDemand(); if (LOG.isDebugEnabled()) { LOG.debug("Counting resource from " + childQueue.getName() + " " + toAdd + "; Total resource consumption for " + getName() + " now " + demand); } demand = Resources.add(demand, toAdd); demand = Resources.componentwiseMin(demand, maxRes); if (Resources.equals(demand, maxRes)) { break; } } if (LOG.isDebugEnabled()) { LOG.debug("The updated demand for " + getName() + " is " + demand + "; the max is " + maxRes); } }
Example 2
Source File: FSLeafQueue.java From hadoop with Apache License 2.0 | 5 votes |
private void updateDemandForApp(FSAppAttempt sched, Resource maxRes) { sched.updateDemand(); Resource toAdd = sched.getDemand(); if (LOG.isDebugEnabled()) { LOG.debug("Counting resource from " + sched.getName() + " " + toAdd + "; Total resource consumption for " + getName() + " now " + demand); } demand = Resources.add(demand, toAdd); demand = Resources.componentwiseMin(demand, maxRes); }
Example 3
Source File: RMContainerAllocator.java From big-c with Apache License 2.0 | 5 votes |
@Private public Resource getResourceLimit() { Resource headRoom = getAvailableResources(); if (headRoom == null) { headRoom = Resources.none(); } Resource assignedMapResource = Resources.multiply(mapResourceRequest, assignedRequests.maps.size()); Resource assignedReduceResource = Resources.multiply(reduceResourceRequest, assignedRequests.reduces.size()); return Resources.add(headRoom, Resources.add(assignedMapResource, assignedReduceResource)); }
Example 4
Source File: ParentQueue.java From big-c with Apache License 2.0 | 5 votes |
private ResourceLimits getResourceLimitsOfChild(CSQueue child, Resource clusterResource, ResourceLimits parentLimits) { // Set resource-limit of a given child, child.limit = // min(my.limit - my.used + child.used, child.max)//why ? // my.limit-my-used+child.used 等于在父队列中除去别的子对列所用资源后剩下的资源,有可能别的子对列所用资源超过了他们自身的限制 // Parent available resource = parent-limit - parent-used-resource Resource parentMaxAvailableResource = Resources.subtract(parentLimits.getLimit(), getUsedResources()); // Child's limit = parent-available-resource + child-used Resource childLimit = Resources.add(parentMaxAvailableResource, child.getUsedResources()); // Get child's max resource Resource childConfiguredMaxResource = Resources.multiplyAndNormalizeDown(resourceCalculator, labelManager .getResourceByLabel(RMNodeLabelsManager.NO_LABEL, clusterResource), child.getAbsoluteMaximumCapacity(), minimumAllocation); // Child's limit should be capped by child configured max resource childLimit = Resources.min(resourceCalculator, clusterResource, childLimit, childConfiguredMaxResource); // Normalize before return childLimit = Resources.roundDown(resourceCalculator, childLimit, minimumAllocation); return new ResourceLimits(childLimit); }
Example 5
Source File: FairScheduler.java From big-c with Apache License 2.0 | 5 votes |
static boolean fitsInMaxShare(FSQueue queue, Resource additionalResource) { Resource usagePlusAddition = Resources.add(queue.getResourceUsage(), additionalResource); if (!Resources.fitsIn(usagePlusAddition, queue.getMaxShare())) { return false; } FSQueue parentQueue = queue.getParent(); if (parentQueue != null) { return fitsInMaxShare(parentQueue, additionalResource); } return true; }
Example 6
Source File: FSLeafQueue.java From big-c with Apache License 2.0 | 5 votes |
/** * Check whether this queue can run this application master under the * maxAMShare limit * * @param amResource * @return true if this queue can run */ public boolean canRunAppAM(Resource amResource) { float maxAMShare = scheduler.getAllocationConfiguration().getQueueMaxAMShare(getName()); if (Math.abs(maxAMShare - -1.0f) < 0.0001) { return true; } Resource maxAMResource = Resources.multiply(getFairShare(), maxAMShare); Resource ifRunAMResource = Resources.add(amResourceUsage, amResource); return !policy .checkIfAMResourceUsageOverLimit(ifRunAMResource, maxAMResource); }
Example 7
Source File: FSLeafQueue.java From big-c with Apache License 2.0 | 5 votes |
private void updateDemandForApp(FSAppAttempt sched, Resource maxRes) { sched.updateDemand(); Resource toAdd = sched.getDemand(); if (LOG.isDebugEnabled()) { LOG.debug("Counting resource from " + sched.getName() + " " + toAdd + "; Total resource consumption for " + getName() + " now " + demand); } demand = Resources.add(demand, toAdd); demand = Resources.componentwiseMin(demand, maxRes); }
Example 8
Source File: RMContainerAllocator.java From hadoop with Apache License 2.0 | 5 votes |
@Private public Resource getResourceLimit() { Resource headRoom = getAvailableResources(); if (headRoom == null) { headRoom = Resources.none(); } Resource assignedMapResource = Resources.multiply(mapResourceRequest, assignedRequests.maps.size()); Resource assignedReduceResource = Resources.multiply(reduceResourceRequest, assignedRequests.reduces.size()); return Resources.add(headRoom, Resources.add(assignedMapResource, assignedReduceResource)); }
Example 9
Source File: ParentQueue.java From hadoop with Apache License 2.0 | 5 votes |
private ResourceLimits getResourceLimitsOfChild(CSQueue child, Resource clusterResource, ResourceLimits parentLimits) { // Set resource-limit of a given child, child.limit = // min(my.limit - my.used + child.used, child.max) // Parent available resource = parent-limit - parent-used-resource Resource parentMaxAvailableResource = Resources.subtract(parentLimits.getLimit(), getUsedResources()); // Child's limit = parent-available-resource + child-used Resource childLimit = Resources.add(parentMaxAvailableResource, child.getUsedResources()); // Get child's max resource Resource childConfiguredMaxResource = Resources.multiplyAndNormalizeDown(resourceCalculator, labelManager .getResourceByLabel(RMNodeLabelsManager.NO_LABEL, clusterResource), child.getAbsoluteMaximumCapacity(), minimumAllocation); // Child's limit should be capped by child configured max resource childLimit = Resources.min(resourceCalculator, clusterResource, childLimit, childConfiguredMaxResource); // Normalize before return childLimit = Resources.roundDown(resourceCalculator, childLimit, minimumAllocation); return new ResourceLimits(childLimit); }
Example 10
Source File: FairScheduler.java From hadoop with Apache License 2.0 | 5 votes |
static boolean fitsInMaxShare(FSQueue queue, Resource additionalResource) { Resource usagePlusAddition = Resources.add(queue.getResourceUsage(), additionalResource); if (!Resources.fitsIn(usagePlusAddition, queue.getMaxShare())) { return false; } FSQueue parentQueue = queue.getParent(); if (parentQueue != null) { return fitsInMaxShare(parentQueue, additionalResource); } return true; }
Example 11
Source File: FSLeafQueue.java From hadoop with Apache License 2.0 | 5 votes |
/** * Check whether this queue can run this application master under the * maxAMShare limit * * @param amResource * @return true if this queue can run */ public boolean canRunAppAM(Resource amResource) { float maxAMShare = scheduler.getAllocationConfiguration().getQueueMaxAMShare(getName()); if (Math.abs(maxAMShare - -1.0f) < 0.0001) { return true; } Resource maxAMResource = Resources.multiply(getFairShare(), maxAMShare); Resource ifRunAMResource = Resources.add(amResourceUsage, amResource); return !policy .checkIfAMResourceUsageOverLimit(ifRunAMResource, maxAMResource); }
Example 12
Source File: LeafQueue.java From hadoop with Apache License 2.0 | 4 votes |
@Lock(NoLock.class) private Resource computeUserLimit(FiCaSchedulerApp application, Resource clusterResource, Resource required, User user, Set<String> requestedLabels) { // What is our current capacity? // * It is equal to the max(required, queue-capacity) if // we're running below capacity. The 'max' ensures that jobs in queues // with miniscule capacity (< 1 slot) make progress // * If we're running over capacity, then its // (usedResources + required) (which extra resources we are allocating) Resource queueCapacity = Resource.newInstance(0, 0, 0); if (requestedLabels != null && !requestedLabels.isEmpty()) { // if we have multiple labels to request, we will choose to use the first // label String firstLabel = requestedLabels.iterator().next(); queueCapacity = Resources .max(resourceCalculator, clusterResource, queueCapacity, Resources.multiplyAndNormalizeUp(resourceCalculator, labelManager.getResourceByLabel(firstLabel, clusterResource), queueCapacities.getAbsoluteCapacity(firstLabel), minimumAllocation)); } else { // else there's no label on request, just to use absolute capacity as // capacity for nodes without label queueCapacity = Resources.multiplyAndNormalizeUp(resourceCalculator, labelManager .getResourceByLabel(CommonNodeLabelsManager.NO_LABEL, clusterResource), queueCapacities.getAbsoluteCapacity(), minimumAllocation); } // Allow progress for queues with miniscule capacity queueCapacity = Resources.max( resourceCalculator, clusterResource, queueCapacity, required); Resource currentCapacity = Resources.lessThan(resourceCalculator, clusterResource, queueUsage.getUsed(), queueCapacity) ? queueCapacity : Resources.add(queueUsage.getUsed(), required); // Never allow a single user to take more than the // queue's configured capacity * user-limit-factor. // Also, the queue's configured capacity should be higher than // queue-hard-limit * ulMin final int activeUsers = activeUsersManager.getNumActiveUsers(); Resource limit = Resources.roundUp( resourceCalculator, Resources.min( resourceCalculator, clusterResource, Resources.max( resourceCalculator, clusterResource, Resources.divideAndCeil( resourceCalculator, currentCapacity, activeUsers), Resources.divideAndCeil( resourceCalculator, Resources.multiplyAndRoundDown( currentCapacity, userLimit), 100) ), Resources.multiplyAndRoundDown(queueCapacity, userLimitFactor) ), minimumAllocation); if (LOG.isDebugEnabled()) { String userName = application.getUser(); LOG.debug("User limit computation for " + userName + " in queue " + getQueueName() + " userLimit=" + userLimit + " userLimitFactor=" + userLimitFactor + " required: " + required + " consumed: " + user.getUsed() + " limit: " + limit + " queueCapacity: " + queueCapacity + " qconsumed: " + queueUsage.getUsed() + " currentCapacity: " + currentCapacity + " activeUsers: " + activeUsers + " clusterCapacity: " + clusterResource ); } user.setUserResourceLimit(limit); return limit; }
Example 13
Source File: ProportionalCapacityPreemptionPolicy.java From hadoop with Apache License 2.0 | 4 votes |
/** * Given a set of queues compute the fix-point distribution of unassigned * resources among them. As pending request of a queue are exhausted, the * queue is removed from the set and remaining capacity redistributed among * remaining queues. The distribution is weighted based on guaranteed * capacity, unless asked to ignoreGuarantee, in which case resources are * distributed uniformly. */ private void computeFixpointAllocation(ResourceCalculator rc, Resource tot_guarant, Collection<TempQueue> qAlloc, Resource unassigned, boolean ignoreGuarantee) { // Prior to assigning the unused resources, process each queue as follows: // If current > guaranteed, idealAssigned = guaranteed + untouchable extra // Else idealAssigned = current; // Subtract idealAssigned resources from unassigned. // If the queue has all of its needs met (that is, if // idealAssigned >= current + pending), remove it from consideration. // Sort queues from most under-guaranteed to most over-guaranteed. TQComparator tqComparator = new TQComparator(rc, tot_guarant); PriorityQueue<TempQueue> orderedByNeed = new PriorityQueue<TempQueue>(10,tqComparator); for (Iterator<TempQueue> i = qAlloc.iterator(); i.hasNext();) { TempQueue q = i.next(); if (Resources.greaterThan(rc, tot_guarant, q.current, q.guaranteed)) { q.idealAssigned = Resources.add(q.guaranteed, q.untouchableExtra); } else { q.idealAssigned = Resources.clone(q.current); } Resources.subtractFrom(unassigned, q.idealAssigned); // If idealAssigned < (current + pending), q needs more resources, so // add it to the list of underserved queues, ordered by need. Resource curPlusPend = Resources.add(q.current, q.pending); if (Resources.lessThan(rc, tot_guarant, q.idealAssigned, curPlusPend)) { orderedByNeed.add(q); } } //assign all cluster resources until no more demand, or no resources are left while (!orderedByNeed.isEmpty() && Resources.greaterThan(rc,tot_guarant, unassigned,Resources.none())) { Resource wQassigned = Resource.newInstance(0, 0, 0); // we compute normalizedGuarantees capacity based on currently active // queues resetCapacity(rc, unassigned, orderedByNeed, ignoreGuarantee); // For each underserved queue (or set of queues if multiple are equally // underserved), offer its share of the unassigned resources based on its // normalized guarantee. After the offer, if the queue is not satisfied, // place it back in the ordered list of queues, recalculating its place // in the order of most under-guaranteed to most over-guaranteed. In this // way, the most underserved queue(s) are always given resources first. Collection<TempQueue> underserved = getMostUnderservedQueues(orderedByNeed, tqComparator); for (Iterator<TempQueue> i = underserved.iterator(); i.hasNext();) { TempQueue sub = i.next(); Resource wQavail = Resources.multiplyAndNormalizeUp(rc, unassigned, sub.normalizedGuarantee, Resource.newInstance(1, 1, 0)); Resource wQidle = sub.offer(wQavail, rc, tot_guarant); Resource wQdone = Resources.subtract(wQavail, wQidle); if (Resources.greaterThan(rc, tot_guarant, wQdone, Resources.none())) { // The queue is still asking for more. Put it back in the priority // queue, recalculating its order based on need. orderedByNeed.add(sub); } Resources.addTo(wQassigned, wQdone); } Resources.subtractFrom(unassigned, wQassigned); } }
Example 14
Source File: LeafQueue.java From hadoop with Apache License 2.0 | 4 votes |
private synchronized void activateApplications() { // limit of allowed resource usage for application masters Map<String, Resource> amPartitionLimit = new HashMap<String, Resource>(); Map<String, Resource> userAmPartitionLimit = new HashMap<String, Resource>(); for (Iterator<FiCaSchedulerApp> i=pendingApplications.iterator(); i.hasNext(); ) { FiCaSchedulerApp application = i.next(); // Get the am-node-partition associated with each application // and calculate max-am resource limit for this partition. String partitionName = application.getAppAMNodePartitionName(); Resource amLimit = amPartitionLimit.get(partitionName); // Verify whether we already calculated am-limit for this label. if (amLimit == null) { amLimit = getAMResourceLimitPerPartition(partitionName); amPartitionLimit.put(partitionName, amLimit); } // Check am resource limit. Resource amIfStarted = Resources.add( application.getAMResource(partitionName), queueUsage.getAMUsed(partitionName)); if (LOG.isDebugEnabled()) { LOG.debug("application AMResource " + application.getAMResource(partitionName) + " maxAMResourcePerQueuePercent " + maxAMResourcePerQueuePercent + " amLimit " + amLimit + " lastClusterResource " + lastClusterResource + " amIfStarted " + amIfStarted + " AM node-partition name " + partitionName); } if (!Resources.lessThanOrEqual(resourceCalculator, lastClusterResource, amIfStarted, amLimit)) { if (getNumActiveApplications() < 1 || (Resources.lessThanOrEqual(resourceCalculator, lastClusterResource, queueUsage.getAMUsed(partitionName), Resources.none()))) { LOG.warn("maximum-am-resource-percent is insufficient to start a" + " single application in queue, it is likely set too low." + " skipping enforcement to allow at least one application" + " to start"); } else { LOG.info("not starting application as amIfStarted exceeds amLimit"); continue; } } // Check user am resource limit User user = getUser(application.getUser()); Resource userAMLimit = userAmPartitionLimit.get(partitionName); // Verify whether we already calculated user-am-limit for this label. if (userAMLimit == null) { userAMLimit = getUserAMResourceLimitPerPartition(partitionName); userAmPartitionLimit.put(partitionName, userAMLimit); } Resource userAmIfStarted = Resources.add( application.getAMResource(partitionName), user.getConsumedAMResources(partitionName)); if (!Resources.lessThanOrEqual(resourceCalculator, lastClusterResource, userAmIfStarted, userAMLimit)) { if (getNumActiveApplications() < 1 || (Resources.lessThanOrEqual(resourceCalculator, lastClusterResource, queueUsage.getAMUsed(partitionName), Resources.none()))) { LOG.warn("maximum-am-resource-percent is insufficient to start a" + " single application in queue for user, it is likely set too" + " low. skipping enforcement to allow at least one application" + " to start"); } else { LOG.info("not starting application as amIfStarted exceeds " + "userAmLimit"); continue; } } user.activateApplication(); activeApplications.add(application); queueUsage.incAMUsed(partitionName, application.getAMResource(partitionName)); user.getResourceUsage().incAMUsed(partitionName, application.getAMResource(partitionName)); i.remove(); LOG.info("Application " + application.getApplicationId() + " from user: " + application.getUser() + " activated in queue: " + getQueueName()); } }
Example 15
Source File: RLESparseResourceAllocation.java From big-c with Apache License 2.0 | 4 votes |
/** * Add a resource for the specified interval * * @param reservationInterval the interval for which the resource is to be * added * @param capacity the resource to be added * @return true if addition is successful, false otherwise */ public boolean addInterval(ReservationInterval reservationInterval, ReservationRequest capacity) { Resource totCap = Resources.multiply(capacity.getCapability(), (float) capacity.getNumContainers()); if (totCap.equals(ZERO_RESOURCE)) { return true; } writeLock.lock(); try { long startKey = reservationInterval.getStartTime(); long endKey = reservationInterval.getEndTime(); NavigableMap<Long, Resource> ticks = cumulativeCapacity.headMap(endKey, false); if (ticks != null && !ticks.isEmpty()) { Resource updatedCapacity = Resource.newInstance(0, 0); Entry<Long, Resource> lowEntry = ticks.floorEntry(startKey); if (lowEntry == null) { // This is the earliest starting interval cumulativeCapacity.put(startKey, totCap); } else { updatedCapacity = Resources.add(lowEntry.getValue(), totCap); // Add a new tick only if the updated value is different // from the previous tick if ((startKey == lowEntry.getKey()) && (isSameAsPrevious(lowEntry.getKey(), updatedCapacity))) { cumulativeCapacity.remove(lowEntry.getKey()); } else { cumulativeCapacity.put(startKey, updatedCapacity); } } // Increase all the capacities of overlapping intervals Set<Entry<Long, Resource>> overlapSet = ticks.tailMap(startKey, false).entrySet(); for (Entry<Long, Resource> entry : overlapSet) { updatedCapacity = Resources.add(entry.getValue(), totCap); entry.setValue(updatedCapacity); } } else { // This is the first interval to be added cumulativeCapacity.put(startKey, totCap); } Resource nextTick = cumulativeCapacity.get(endKey); if (nextTick != null) { // If there is overlap, remove the duplicate entry if (isSameAsPrevious(endKey, nextTick)) { cumulativeCapacity.remove(endKey); } } else { // Decrease capacity as this is end of the interval cumulativeCapacity.put(endKey, Resources.subtract(cumulativeCapacity .floorEntry(endKey).getValue(), totCap)); } return true; } finally { writeLock.unlock(); } }
Example 16
Source File: AbstractCSQueue.java From hadoop with Apache License 2.0 | 4 votes |
synchronized boolean canAssignToThisQueue(Resource clusterResource, Set<String> nodeLabels, ResourceLimits currentResourceLimits, Resource nowRequired, Resource resourceCouldBeUnreserved) { // Get label of this queue can access, it's (nodeLabel AND queueLabel) Set<String> labelCanAccess; if (null == nodeLabels || nodeLabels.isEmpty()) { labelCanAccess = new HashSet<String>(); // Any queue can always access any node without label labelCanAccess.add(RMNodeLabelsManager.NO_LABEL); } else { labelCanAccess = new HashSet<String>( accessibleLabels.contains(CommonNodeLabelsManager.ANY) ? nodeLabels : Sets.intersection(accessibleLabels, nodeLabels)); } for (String label : labelCanAccess) { // New total resource = used + required Resource newTotalResource = Resources.add(queueUsage.getUsed(label), nowRequired); Resource currentLimitResource = getCurrentLimitResource(label, clusterResource, currentResourceLimits); if (Resources.greaterThan(resourceCalculator, clusterResource, newTotalResource, currentLimitResource)) { // if reservation continous looking enabled, check to see if could we // potentially use this node instead of a reserved node if the application // has reserved containers. // TODO, now only consider reservation cases when the node has no label if (this.reservationsContinueLooking && label.equals(RMNodeLabelsManager.NO_LABEL) && Resources.greaterThan(resourceCalculator, clusterResource, resourceCouldBeUnreserved, Resources.none())) { // resource-without-reserved = used - reserved Resource newTotalWithoutReservedResource = Resources.subtract(newTotalResource, resourceCouldBeUnreserved); // when total-used-without-reserved-resource < currentLimit, we still // have chance to allocate on this node by unreserving some containers if (Resources.lessThan(resourceCalculator, clusterResource, newTotalWithoutReservedResource, currentLimitResource)) { if (LOG.isDebugEnabled()) { LOG.debug("try to use reserved: " + getQueueName() + " usedResources: " + queueUsage.getUsed() + ", clusterResources: " + clusterResource + ", reservedResources: " + resourceCouldBeUnreserved + ", capacity-without-reserved: " + newTotalWithoutReservedResource + ", maxLimitCapacity: " + currentLimitResource); } currentResourceLimits.setAmountNeededUnreserve(Resources.subtract(newTotalResource, currentLimitResource)); return true; } } if (LOG.isDebugEnabled()) { LOG.debug(getQueueName() + "Check assign to queue, label=" + label + " usedResources: " + queueUsage.getUsed(label) + " clusterResources: " + clusterResource + " currentUsedCapacity " + Resources.divide(resourceCalculator, clusterResource, queueUsage.getUsed(label), labelManager.getResourceByLabel(label, clusterResource)) + " max-capacity: " + queueCapacities.getAbsoluteMaximumCapacity(label) + ")"); } return false; } return true; } // Actually, this will not happen, since labelCanAccess will be always // non-empty return false; }
Example 17
Source File: YarnNodeCapacityManager.java From incubator-myriad with Apache License 2.0 | 4 votes |
/** * Checks if any containers were allocated in the current scheduler run and * launches the corresponding Mesos tasks. It also updates the node * capacity depending on what portion of the consumed offers were actually * used. */ @VisibleForTesting protected void handleContainerAllocation(RMNode rmNode) { String host = rmNode.getNodeID().getHost(); ConsumedOffer consumedOffer = offerLifecycleMgr.drainConsumedOffer(host); if (consumedOffer == null) { LOGGER.debug("No offer consumed for {}", host); return; } Node node = nodeStore.getNode(host); Set<RMContainer> containersBeforeSched = node.getContainerSnapshot(); Set<RMContainer> containersAfterSched = new HashSet<>(node.getNode().getRunningContainers()); Set<RMContainer> containersAllocatedByMesosOffer = (containersBeforeSched == null) ? containersAfterSched : Sets.difference( containersAfterSched, containersBeforeSched); if (containersAllocatedByMesosOffer.isEmpty()) { LOGGER.debug("No containers allocated using Mesos offers for host: {}", host); for (Protos.Offer offer : consumedOffer.getOffers()) { offerLifecycleMgr.declineOffer(offer); } decrementNodeCapacity(rmNode, OfferUtils.getYarnResourcesFromMesosOffers(consumedOffer.getOffers())); } else { LOGGER.debug("Containers allocated using Mesos offers for host: {} count: {}", host, containersAllocatedByMesosOffer.size()); // Identify the Mesos tasks that need to be launched List<Protos.TaskInfo> tasks = Lists.newArrayList(); Resource resUsed = Resource.newInstance(0, 0); for (RMContainer newContainer : containersAllocatedByMesosOffer) { tasks.add(getTaskInfoForContainer(newContainer, consumedOffer, node)); resUsed = Resources.add(resUsed, newContainer.getAllocatedResource()); } // Reduce node capacity to account for unused offers Resource resOffered = OfferUtils.getYarnResourcesFromMesosOffers(consumedOffer.getOffers()); Resource resUnused = Resources.subtract(resOffered, resUsed); decrementNodeCapacity(rmNode, resUnused); myriadDriver.getDriver().launchTasks(consumedOffer.getOfferIds(), tasks); } // No need to hold on to the snapshot anymore node.removeContainerSnapshot(); }
Example 18
Source File: AbstractCSQueue.java From big-c with Apache License 2.0 | 4 votes |
synchronized boolean canAssignToThisQueue(Resource clusterResource, Set<String> nodeLabels, ResourceLimits currentResourceLimits, Resource nowRequired, Resource resourceCouldBeUnreserved) { // Get label of this queue can access, it's (nodeLabel AND queueLabel) Set<String> labelCanAccess; if (null == nodeLabels || nodeLabels.isEmpty()) { labelCanAccess = new HashSet<String>(); // Any queue can always access any node without label labelCanAccess.add(RMNodeLabelsManager.NO_LABEL); } else { labelCanAccess = new HashSet<String>( accessibleLabels.contains(CommonNodeLabelsManager.ANY) ? nodeLabels : Sets.intersection(accessibleLabels, nodeLabels)); } for (String label : labelCanAccess) { // New total resource = used + required Resource newTotalResource = Resources.add(queueUsage.getUsed(label), nowRequired); Resource currentLimitResource = getCurrentLimitResource(label, clusterResource, currentResourceLimits); //current resource is much more than currentlimit if (Resources.greaterThan(resourceCalculator, clusterResource, newTotalResource, currentLimitResource)) { // if reservation continous looking enabled, check to see if could we // potentially use this node instead of a reserved node if the application // has reserved containers. // TODO, now only consider reservation cases when the node has no label if (this.reservationsContinueLooking && label.equals(RMNodeLabelsManager.NO_LABEL) && Resources.greaterThan(resourceCalculator, clusterResource, resourceCouldBeUnreserved, Resources.none())) { // resource-without-reserved = used - reserved Resource newTotalWithoutReservedResource = Resources.subtract(newTotalResource, resourceCouldBeUnreserved); // when total-used-without-reserved-resource < currentLimit, we still // have chance to allocate on this node by unreserving some containers if (Resources.lessThan(resourceCalculator, clusterResource, newTotalWithoutReservedResource, currentLimitResource)) { if (LOG.isDebugEnabled()) { LOG.debug("try to use reserved: " + getQueueName() + " usedResources: " + queueUsage.getUsed() + ", clusterResources: " + clusterResource + ", reservedResources: " + resourceCouldBeUnreserved + ", capacity-without-reserved: " + newTotalWithoutReservedResource + ", maxLimitCapacity: " + currentLimitResource); } currentResourceLimits.setAmountNeededUnreserve(Resources.subtract(newTotalResource, currentLimitResource)); return true; } } if (LOG.isDebugEnabled()) { LOG.debug(getQueueName() + "Check assign to queue, label=" + label + " usedResources: " + queueUsage.getUsed(label) + " clusterResources: " + clusterResource + " currentUsedCapacity " + Resources.divide(resourceCalculator, clusterResource, queueUsage.getUsed(label), labelManager.getResourceByLabel(label, clusterResource)) + " max-capacity: " + queueCapacities.getAbsoluteMaximumCapacity(label) + ")"); } return false; } return true; } // Actually, this will not happen, since labelCanAccess will be always // non-empty return false; }
Example 19
Source File: RLESparseResourceAllocation.java From hadoop with Apache License 2.0 | 4 votes |
/** * Add a resource for the specified interval * * @param reservationInterval the interval for which the resource is to be * added * @param capacity the resource to be added * @return true if addition is successful, false otherwise */ public boolean addInterval(ReservationInterval reservationInterval, ReservationRequest capacity) { Resource totCap = Resources.multiply(capacity.getCapability(), (float) capacity.getNumContainers()); if (totCap.equals(ZERO_RESOURCE)) { return true; } writeLock.lock(); try { long startKey = reservationInterval.getStartTime(); long endKey = reservationInterval.getEndTime(); NavigableMap<Long, Resource> ticks = cumulativeCapacity.headMap(endKey, false); if (ticks != null && !ticks.isEmpty()) { Resource updatedCapacity = Resource.newInstance(0, 0, 0); Entry<Long, Resource> lowEntry = ticks.floorEntry(startKey); if (lowEntry == null) { // This is the earliest starting interval cumulativeCapacity.put(startKey, totCap); } else { updatedCapacity = Resources.add(lowEntry.getValue(), totCap); // Add a new tick only if the updated value is different // from the previous tick if ((startKey == lowEntry.getKey()) && (isSameAsPrevious(lowEntry.getKey(), updatedCapacity))) { cumulativeCapacity.remove(lowEntry.getKey()); } else { cumulativeCapacity.put(startKey, updatedCapacity); } } // Increase all the capacities of overlapping intervals Set<Entry<Long, Resource>> overlapSet = ticks.tailMap(startKey, false).entrySet(); for (Entry<Long, Resource> entry : overlapSet) { updatedCapacity = Resources.add(entry.getValue(), totCap); entry.setValue(updatedCapacity); } } else { // This is the first interval to be added cumulativeCapacity.put(startKey, totCap); } Resource nextTick = cumulativeCapacity.get(endKey); if (nextTick != null) { // If there is overlap, remove the duplicate entry if (isSameAsPrevious(endKey, nextTick)) { cumulativeCapacity.remove(endKey); } } else { // Decrease capacity as this is end of the interval cumulativeCapacity.put(endKey, Resources.subtract(cumulativeCapacity .floorEntry(endKey).getValue(), totCap)); } return true; } finally { writeLock.unlock(); } }
Example 20
Source File: LeafQueue.java From big-c with Apache License 2.0 | 4 votes |
@Lock(NoLock.class) private Resource computeUserLimit(FiCaSchedulerApp application, Resource clusterResource, Resource required, User user, Set<String> requestedLabels) { // What is our current capacity? // * It is equal to the max(required, queue-capacity) if // we're running below capacity. The 'max' ensures that jobs in queues // with miniscule capacity (< 1 slot) make progress // * If we're running over capacity, then its // (usedResources + required) (which extra resources we are allocating) Resource queueCapacity = Resource.newInstance(0, 0); if (requestedLabels != null && !requestedLabels.isEmpty()) { // if we have multiple labels to request, we will choose to use the first // label String firstLabel = requestedLabels.iterator().next(); queueCapacity = Resources .max(resourceCalculator, clusterResource, queueCapacity, Resources.multiplyAndNormalizeUp(resourceCalculator, labelManager.getResourceByLabel(firstLabel, clusterResource), queueCapacities.getAbsoluteCapacity(firstLabel), minimumAllocation)); } else { // else there's no label on request, just to use absolute capacity as // capacity for nodes without label queueCapacity = Resources.multiplyAndNormalizeUp(resourceCalculator, labelManager .getResourceByLabel(CommonNodeLabelsManager.NO_LABEL, clusterResource), queueCapacities.getAbsoluteCapacity(), minimumAllocation); } // Allow progress for queues with miniscule capacity,至少这个queue每次能够分配一个container。可以稍微的超出每个queue的limit queueCapacity = Resources.max( resourceCalculator, clusterResource, queueCapacity, required); Resource currentCapacity = Resources.lessThan(resourceCalculator, clusterResource, queueUsage.getUsed(), queueCapacity) ? queueCapacity : Resources.add(queueUsage.getUsed(), required); // Never allow a single user to take more than the // queue's configured capacity * user-limit-factor. // Also, the queue's configured capacity should be higher than // queue-hard-limit * ulMin final int activeUsers = activeUsersManager.getNumActiveUsers(); //UserCapacity_limit =currentCapacity*userLimit //can not be less than this if multiple user share cluster,如果用户数太多,导致每个人的share //小于这个数值则不再为新提交的job分配资源,而是把它queue起来 //UserCapacity_Average=currentCapacity/activeUSers //如果多用户提交则平局一下 //UserCapacity =max{UserCapacity_limit, UserCapacity_Average} //FactorCapacity =queueCapacity * userLimitFactor //可以让一个用户拿到多余一个queue容量的资源,default1,如果这个cluster idle的话可以多分配 //Final_UserCapacity =min{UserCapacity, FactorCapacity } Resource limit = Resources.roundUp( resourceCalculator, Resources.min( resourceCalculator, clusterResource, Resources.max( resourceCalculator, clusterResource, Resources.divideAndCeil( resourceCalculator, currentCapacity, activeUsers), Resources.divideAndCeil( resourceCalculator, Resources.multiplyAndRoundDown( currentCapacity, userLimit), 100) ), Resources.multiplyAndRoundDown(queueCapacity, userLimitFactor) ), minimumAllocation); String userName = application.getUser(); /* LOG.info("User limit computation for " + userName + " in queue " + getQueueName() + " userLimit=" + userLimit + " userLimitFactor=" + userLimitFactor + " required: " + required + " consumed: " + user.getUsed() + " limit: " + limit + " queueCapacity: " + queueCapacity + " qconsumed: " + queueUsage.getUsed() + " currentCapacity: " + currentCapacity + " activeUsers: " + activeUsers + " clusterCapacity: " + clusterResource ); */ user.setUserResourceLimit(limit); return limit; }