Java Code Examples for org.apache.hadoop.yarn.util.resource.Resources#multiplyAndNormalizeUp()
The following examples show how to use
org.apache.hadoop.yarn.util.resource.Resources#multiplyAndNormalizeUp() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LeafQueue.java From hadoop with Apache License 2.0 | 6 votes |
public synchronized Resource getUserAMResourceLimitPerPartition( String nodePartition) { /* * The user am resource limit is based on the same approach as the user * limit (as it should represent a subset of that). This means that it uses * the absolute queue capacity (per partition) instead of the max and is * modified by the userlimit and the userlimit factor as is the userlimit */ float effectiveUserLimit = Math.max(userLimit / 100.0f, 1.0f / Math.max(getActiveUsersManager().getNumActiveUsers(), 1)); Resource queuePartitionResource = Resources.multiplyAndNormalizeUp( resourceCalculator, labelManager.getResourceByLabel(nodePartition, lastClusterResource), queueCapacities.getAbsoluteCapacity(nodePartition), minimumAllocation); return Resources.multiplyAndNormalizeUp(resourceCalculator, queuePartitionResource, queueCapacities.getMaxAMResourcePercentage(nodePartition) * effectiveUserLimit * userLimitFactor, minimumAllocation); }
Example 2
Source File: LeafQueue.java From big-c with Apache License 2.0 | 6 votes |
public synchronized Resource getUserAMResourceLimit() { /* * The user amresource limit is based on the same approach as the * user limit (as it should represent a subset of that). This means that * it uses the absolute queue capacity instead of the max and is modified * by the userlimit and the userlimit factor as is the userlimit * */ float effectiveUserLimit = Math.max(userLimit / 100.0f, 1.0f / Math.max(getActiveUsersManager().getNumActiveUsers(), 1)); return Resources.multiplyAndNormalizeUp( resourceCalculator, absoluteCapacityResource, maxAMResourcePerQueuePercent * effectiveUserLimit * userLimitFactor, minimumAllocation); }
Example 3
Source File: LeafQueue.java From hadoop with Apache License 2.0 | 5 votes |
public synchronized Resource getAMResourceLimitPerPartition( String nodePartition) { /* * For non-labeled partition, get the max value from resources currently * available to the queue and the absolute resources guaranteed for the * partition in the queue. For labeled partition, consider only the absolute * resources guaranteed. Multiply this value (based on labeled/ * non-labeled), * with per-partition am-resource-percent to get the max am * resource limit for this queue and partition. */ Resource queuePartitionResource = Resources.multiplyAndNormalizeUp( resourceCalculator, labelManager.getResourceByLabel(nodePartition, lastClusterResource), queueCapacities.getAbsoluteCapacity(nodePartition), minimumAllocation); Resource queueCurrentLimit = Resources.none(); // For non-labeled partition, we need to consider the current queue // usage limit. if (nodePartition.equals(RMNodeLabelsManager.NO_LABEL)) { synchronized (queueResourceLimitsInfo) { queueCurrentLimit = queueResourceLimitsInfo.getQueueCurrentLimit(); } } float amResourcePercent = queueCapacities .getMaxAMResourcePercentage(nodePartition); // Current usable resource for this queue and partition is the max of // queueCurrentLimit and queuePartitionResource. Resource queuePartitionUsableResource = Resources.max(resourceCalculator, lastClusterResource, queueCurrentLimit, queuePartitionResource); Resource amResouceLimit = Resources.multiplyAndNormalizeUp( resourceCalculator, queuePartitionUsableResource, amResourcePercent, minimumAllocation); //YARN-3494 //metrics.setAMResouceLimit(amResouceLimit); return amResouceLimit; }
Example 4
Source File: LeafQueue.java From big-c with Apache License 2.0 | 5 votes |
public synchronized Resource getAMResourceLimit() { /* * The limit to the amount of resources which can be consumed by * application masters for applications running in the queue * is calculated by taking the greater of the max resources currently * available to the queue (see absoluteMaxAvailCapacity) and the absolute * resources guaranteed for the queue and multiplying it by the am * resource percent. * * This is to allow a queue to grow its (proportional) application * master resource use up to its max capacity when other queues are * idle but to scale back down to it's guaranteed capacity as they * become busy. * */ Resource queueCurrentLimit; synchronized (queueResourceLimitsInfo) { queueCurrentLimit = queueResourceLimitsInfo.getQueueCurrentLimit(); } //该队列的最大资源限制 Resource queueCap = Resources.max(resourceCalculator, lastClusterResource, absoluteCapacityResource, queueCurrentLimit); return Resources.multiplyAndNormalizeUp( resourceCalculator, queueCap, maxAMResourcePerQueuePercent, minimumAllocation); }
Example 5
Source File: LeafQueue.java From hadoop with Apache License 2.0 | 4 votes |
@Lock(NoLock.class) private Resource computeUserLimit(FiCaSchedulerApp application, Resource clusterResource, Resource required, User user, Set<String> requestedLabels) { // What is our current capacity? // * It is equal to the max(required, queue-capacity) if // we're running below capacity. The 'max' ensures that jobs in queues // with miniscule capacity (< 1 slot) make progress // * If we're running over capacity, then its // (usedResources + required) (which extra resources we are allocating) Resource queueCapacity = Resource.newInstance(0, 0, 0); if (requestedLabels != null && !requestedLabels.isEmpty()) { // if we have multiple labels to request, we will choose to use the first // label String firstLabel = requestedLabels.iterator().next(); queueCapacity = Resources .max(resourceCalculator, clusterResource, queueCapacity, Resources.multiplyAndNormalizeUp(resourceCalculator, labelManager.getResourceByLabel(firstLabel, clusterResource), queueCapacities.getAbsoluteCapacity(firstLabel), minimumAllocation)); } else { // else there's no label on request, just to use absolute capacity as // capacity for nodes without label queueCapacity = Resources.multiplyAndNormalizeUp(resourceCalculator, labelManager .getResourceByLabel(CommonNodeLabelsManager.NO_LABEL, clusterResource), queueCapacities.getAbsoluteCapacity(), minimumAllocation); } // Allow progress for queues with miniscule capacity queueCapacity = Resources.max( resourceCalculator, clusterResource, queueCapacity, required); Resource currentCapacity = Resources.lessThan(resourceCalculator, clusterResource, queueUsage.getUsed(), queueCapacity) ? queueCapacity : Resources.add(queueUsage.getUsed(), required); // Never allow a single user to take more than the // queue's configured capacity * user-limit-factor. // Also, the queue's configured capacity should be higher than // queue-hard-limit * ulMin final int activeUsers = activeUsersManager.getNumActiveUsers(); Resource limit = Resources.roundUp( resourceCalculator, Resources.min( resourceCalculator, clusterResource, Resources.max( resourceCalculator, clusterResource, Resources.divideAndCeil( resourceCalculator, currentCapacity, activeUsers), Resources.divideAndCeil( resourceCalculator, Resources.multiplyAndRoundDown( currentCapacity, userLimit), 100) ), Resources.multiplyAndRoundDown(queueCapacity, userLimitFactor) ), minimumAllocation); if (LOG.isDebugEnabled()) { String userName = application.getUser(); LOG.debug("User limit computation for " + userName + " in queue " + getQueueName() + " userLimit=" + userLimit + " userLimitFactor=" + userLimitFactor + " required: " + required + " consumed: " + user.getUsed() + " limit: " + limit + " queueCapacity: " + queueCapacity + " qconsumed: " + queueUsage.getUsed() + " currentCapacity: " + currentCapacity + " activeUsers: " + activeUsers + " clusterCapacity: " + clusterResource ); } user.setUserResourceLimit(limit); return limit; }
Example 6
Source File: LeafQueue.java From hadoop with Apache License 2.0 | 4 votes |
private void updateAbsoluteCapacityResource(Resource clusterResource) { absoluteCapacityResource = Resources.multiplyAndNormalizeUp(resourceCalculator, clusterResource, queueCapacities.getAbsoluteCapacity(), minimumAllocation); }
Example 7
Source File: ProportionalCapacityPreemptionPolicy.java From hadoop with Apache License 2.0 | 4 votes |
/** * Given a set of queues compute the fix-point distribution of unassigned * resources among them. As pending request of a queue are exhausted, the * queue is removed from the set and remaining capacity redistributed among * remaining queues. The distribution is weighted based on guaranteed * capacity, unless asked to ignoreGuarantee, in which case resources are * distributed uniformly. */ private void computeFixpointAllocation(ResourceCalculator rc, Resource tot_guarant, Collection<TempQueue> qAlloc, Resource unassigned, boolean ignoreGuarantee) { // Prior to assigning the unused resources, process each queue as follows: // If current > guaranteed, idealAssigned = guaranteed + untouchable extra // Else idealAssigned = current; // Subtract idealAssigned resources from unassigned. // If the queue has all of its needs met (that is, if // idealAssigned >= current + pending), remove it from consideration. // Sort queues from most under-guaranteed to most over-guaranteed. TQComparator tqComparator = new TQComparator(rc, tot_guarant); PriorityQueue<TempQueue> orderedByNeed = new PriorityQueue<TempQueue>(10,tqComparator); for (Iterator<TempQueue> i = qAlloc.iterator(); i.hasNext();) { TempQueue q = i.next(); if (Resources.greaterThan(rc, tot_guarant, q.current, q.guaranteed)) { q.idealAssigned = Resources.add(q.guaranteed, q.untouchableExtra); } else { q.idealAssigned = Resources.clone(q.current); } Resources.subtractFrom(unassigned, q.idealAssigned); // If idealAssigned < (current + pending), q needs more resources, so // add it to the list of underserved queues, ordered by need. Resource curPlusPend = Resources.add(q.current, q.pending); if (Resources.lessThan(rc, tot_guarant, q.idealAssigned, curPlusPend)) { orderedByNeed.add(q); } } //assign all cluster resources until no more demand, or no resources are left while (!orderedByNeed.isEmpty() && Resources.greaterThan(rc,tot_guarant, unassigned,Resources.none())) { Resource wQassigned = Resource.newInstance(0, 0, 0); // we compute normalizedGuarantees capacity based on currently active // queues resetCapacity(rc, unassigned, orderedByNeed, ignoreGuarantee); // For each underserved queue (or set of queues if multiple are equally // underserved), offer its share of the unassigned resources based on its // normalized guarantee. After the offer, if the queue is not satisfied, // place it back in the ordered list of queues, recalculating its place // in the order of most under-guaranteed to most over-guaranteed. In this // way, the most underserved queue(s) are always given resources first. Collection<TempQueue> underserved = getMostUnderservedQueues(orderedByNeed, tqComparator); for (Iterator<TempQueue> i = underserved.iterator(); i.hasNext();) { TempQueue sub = i.next(); Resource wQavail = Resources.multiplyAndNormalizeUp(rc, unassigned, sub.normalizedGuarantee, Resource.newInstance(1, 1, 0)); Resource wQidle = sub.offer(wQavail, rc, tot_guarant); Resource wQdone = Resources.subtract(wQavail, wQidle); if (Resources.greaterThan(rc, tot_guarant, wQdone, Resources.none())) { // The queue is still asking for more. Put it back in the priority // queue, recalculating its order based on need. orderedByNeed.add(sub); } Resources.addTo(wQassigned, wQdone); } Resources.subtractFrom(unassigned, wQassigned); } }
Example 8
Source File: LeafQueue.java From big-c with Apache License 2.0 | 4 votes |
@Lock(NoLock.class) private Resource computeUserLimit(FiCaSchedulerApp application, Resource clusterResource, Resource required, User user, Set<String> requestedLabels) { // What is our current capacity? // * It is equal to the max(required, queue-capacity) if // we're running below capacity. The 'max' ensures that jobs in queues // with miniscule capacity (< 1 slot) make progress // * If we're running over capacity, then its // (usedResources + required) (which extra resources we are allocating) Resource queueCapacity = Resource.newInstance(0, 0); if (requestedLabels != null && !requestedLabels.isEmpty()) { // if we have multiple labels to request, we will choose to use the first // label String firstLabel = requestedLabels.iterator().next(); queueCapacity = Resources .max(resourceCalculator, clusterResource, queueCapacity, Resources.multiplyAndNormalizeUp(resourceCalculator, labelManager.getResourceByLabel(firstLabel, clusterResource), queueCapacities.getAbsoluteCapacity(firstLabel), minimumAllocation)); } else { // else there's no label on request, just to use absolute capacity as // capacity for nodes without label queueCapacity = Resources.multiplyAndNormalizeUp(resourceCalculator, labelManager .getResourceByLabel(CommonNodeLabelsManager.NO_LABEL, clusterResource), queueCapacities.getAbsoluteCapacity(), minimumAllocation); } // Allow progress for queues with miniscule capacity,至少这个queue每次能够分配一个container。可以稍微的超出每个queue的limit queueCapacity = Resources.max( resourceCalculator, clusterResource, queueCapacity, required); Resource currentCapacity = Resources.lessThan(resourceCalculator, clusterResource, queueUsage.getUsed(), queueCapacity) ? queueCapacity : Resources.add(queueUsage.getUsed(), required); // Never allow a single user to take more than the // queue's configured capacity * user-limit-factor. // Also, the queue's configured capacity should be higher than // queue-hard-limit * ulMin final int activeUsers = activeUsersManager.getNumActiveUsers(); //UserCapacity_limit =currentCapacity*userLimit //can not be less than this if multiple user share cluster,如果用户数太多,导致每个人的share //小于这个数值则不再为新提交的job分配资源,而是把它queue起来 //UserCapacity_Average=currentCapacity/activeUSers //如果多用户提交则平局一下 //UserCapacity =max{UserCapacity_limit, UserCapacity_Average} //FactorCapacity =queueCapacity * userLimitFactor //可以让一个用户拿到多余一个queue容量的资源,default1,如果这个cluster idle的话可以多分配 //Final_UserCapacity =min{UserCapacity, FactorCapacity } Resource limit = Resources.roundUp( resourceCalculator, Resources.min( resourceCalculator, clusterResource, Resources.max( resourceCalculator, clusterResource, Resources.divideAndCeil( resourceCalculator, currentCapacity, activeUsers), Resources.divideAndCeil( resourceCalculator, Resources.multiplyAndRoundDown( currentCapacity, userLimit), 100) ), Resources.multiplyAndRoundDown(queueCapacity, userLimitFactor) ), minimumAllocation); String userName = application.getUser(); /* LOG.info("User limit computation for " + userName + " in queue " + getQueueName() + " userLimit=" + userLimit + " userLimitFactor=" + userLimitFactor + " required: " + required + " consumed: " + user.getUsed() + " limit: " + limit + " queueCapacity: " + queueCapacity + " qconsumed: " + queueUsage.getUsed() + " currentCapacity: " + currentCapacity + " activeUsers: " + activeUsers + " clusterCapacity: " + clusterResource ); */ user.setUserResourceLimit(limit); return limit; }
Example 9
Source File: LeafQueue.java From big-c with Apache License 2.0 | 4 votes |
private void updateAbsoluteCapacityResource(Resource clusterResource) { absoluteCapacityResource = Resources.multiplyAndNormalizeUp(resourceCalculator, clusterResource, queueCapacities.getAbsoluteCapacity(), minimumAllocation); }