Java Code Examples for org.apache.flink.runtime.jobmanager.scheduler.CoLocationGroup#getLocationConstraint()
The following examples show how to use
org.apache.flink.runtime.jobmanager.scheduler.CoLocationGroup#getLocationConstraint() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SharedSlotsTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testImmediateReleaseTwoLevel() { try { JobVertexID vid = new JobVertexID(); JobVertex vertex = new JobVertex("vertex", vid); SlotSharingGroup sharingGroup = new SlotSharingGroup(vid); SlotSharingGroupAssignment assignment = sharingGroup.getTaskAssignment(); CoLocationGroup coLocationGroup = new CoLocationGroup(vertex); CoLocationConstraint constraint = coLocationGroup.getLocationConstraint(0); Instance instance = SchedulerTestUtils.getRandomInstance(1); SharedSlot sharedSlot = instance.allocateSharedSlot(assignment); SimpleSlot sub = assignment.addSharedSlotAndAllocateSubSlot(sharedSlot, Locality.UNCONSTRAINED, constraint); assertNull(sub.getGroupID()); assertEquals(constraint.getSharedSlot(), sub.getParent()); sub.releaseSlot(); assertTrue(sub.isReleased()); assertTrue(sharedSlot.isReleased()); assertEquals(1, instance.getNumberOfAvailableSlots()); assertEquals(0, instance.getNumberOfAllocatedSlots()); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example 2
Source File: ExecutionVertex.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Creates an ExecutionVertex. * * @param timeout * The RPC timeout to use for deploy / cancel calls * @param initialGlobalModVersion * The global modification version to initialize the first Execution with. * @param createTimestamp * The timestamp for the vertex creation, used to initialize the first Execution with. * @param maxPriorExecutionHistoryLength * The number of prior Executions (= execution attempts) to keep. */ public ExecutionVertex( ExecutionJobVertex jobVertex, int subTaskIndex, IntermediateResult[] producedDataSets, Time timeout, long initialGlobalModVersion, long createTimestamp, int maxPriorExecutionHistoryLength) { this.jobVertex = jobVertex; this.subTaskIndex = subTaskIndex; this.taskNameWithSubtask = String.format("%s (%d/%d)", jobVertex.getJobVertex().getName(), subTaskIndex + 1, jobVertex.getParallelism()); this.resultPartitions = new LinkedHashMap<>(producedDataSets.length, 1); for (IntermediateResult result : producedDataSets) { IntermediateResultPartition irp = new IntermediateResultPartition(result, this, subTaskIndex); result.setPartition(subTaskIndex, irp); resultPartitions.put(irp.getPartitionId(), irp); } this.inputEdges = new ExecutionEdge[jobVertex.getJobVertex().getInputs().size()][]; this.priorExecutions = new EvictingBoundedList<>(maxPriorExecutionHistoryLength); this.currentExecution = new Execution( getExecutionGraph().getFutureExecutor(), this, 0, initialGlobalModVersion, createTimestamp, timeout); // create a co-location scheduling hint, if necessary CoLocationGroup clg = jobVertex.getCoLocationGroup(); if (clg != null) { this.locationConstraint = clg.getLocationConstraint(subTaskIndex); } else { this.locationConstraint = null; } getExecutionGraph().registerExecution(currentExecution); this.timeout = timeout; }
Example 3
Source File: ExecutionVertex.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Archives the current Execution and creates a new Execution for this vertex. * * <p>This method atomically checks if the ExecutionGraph is still of an expected * global mod. version and replaces the execution if that is the case. If the ExecutionGraph * has increased its global mod. version in the meantime, this operation fails. * * <p>This mechanism can be used to prevent conflicts between various concurrent recovery and * reconfiguration actions in a similar way as "optimistic concurrency control". * * @param timestamp * The creation timestamp for the new Execution * @param originatingGlobalModVersion * * @return Returns the new created Execution. * * @throws GlobalModVersionMismatch Thrown, if the execution graph has a new global mod * version than the one passed to this message. */ public Execution resetForNewExecution(final long timestamp, final long originatingGlobalModVersion) throws GlobalModVersionMismatch { LOG.debug("Resetting execution vertex {} for new execution.", getTaskNameWithSubtaskIndex()); synchronized (priorExecutions) { // check if another global modification has been triggered since the // action that originally caused this reset/restart happened final long actualModVersion = getExecutionGraph().getGlobalModVersion(); if (actualModVersion > originatingGlobalModVersion) { // global change happened since, reject this action throw new GlobalModVersionMismatch(originatingGlobalModVersion, actualModVersion); } final Execution oldExecution = currentExecution; final ExecutionState oldState = oldExecution.getState(); if (oldState.isTerminal()) { priorExecutions.add(oldExecution.archive()); final Execution newExecution = new Execution( getExecutionGraph().getFutureExecutor(), this, oldExecution.getAttemptNumber() + 1, originatingGlobalModVersion, timestamp, timeout); this.currentExecution = newExecution; CoLocationGroup grp = jobVertex.getCoLocationGroup(); if (grp != null) { this.locationConstraint = grp.getLocationConstraint(subTaskIndex); } // register this execution at the execution graph, to receive call backs getExecutionGraph().registerExecution(newExecution); // if the execution was 'FINISHED' before, tell the ExecutionGraph that // we take one step back on the road to reaching global FINISHED if (oldState == FINISHED) { getExecutionGraph().vertexUnFinished(); } return newExecution; } else { throw new IllegalStateException("Cannot reset a vertex that is in non-terminal state " + oldState); } } }
Example 4
Source File: SlotPoolCoLocationTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Tests the scheduling of two tasks with a parallelism of 2 and a co-location constraint. */ @Test public void testSimpleCoLocatedSlotScheduling() throws ExecutionException, InterruptedException { final BlockingQueue<AllocationID> allocationIds = new ArrayBlockingQueue<>(2); final TestingResourceManagerGateway testingResourceManagerGateway = slotPoolResource.getTestingResourceManagerGateway(); testingResourceManagerGateway.setRequestSlotConsumer( (SlotRequest slotRequest) -> allocationIds.offer(slotRequest.getAllocationId())); final TaskManagerLocation taskManagerLocation = new LocalTaskManagerLocation(); final SlotPool slotPoolGateway = slotPoolResource.getSlotPool(); slotPoolGateway.registerTaskManager(taskManagerLocation.getResourceID()); CoLocationGroup group = new CoLocationGroup(); CoLocationConstraint coLocationConstraint1 = group.getLocationConstraint(0); CoLocationConstraint coLocationConstraint2 = group.getLocationConstraint(1); final SlotSharingGroupId slotSharingGroupId = new SlotSharingGroupId(); JobVertexID jobVertexId1 = new JobVertexID(); JobVertexID jobVertexId2 = new JobVertexID(); final SlotProvider slotProvider = slotPoolResource.getSlotProvider(); CompletableFuture<LogicalSlot> logicalSlotFuture11 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId1, slotSharingGroupId, coLocationConstraint1), true, SlotProfile.noRequirements(), TestingUtils.infiniteTime()); CompletableFuture<LogicalSlot> logicalSlotFuture22 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId2, slotSharingGroupId, coLocationConstraint2), true, SlotProfile.noRequirements(), TestingUtils.infiniteTime()); CompletableFuture<LogicalSlot> logicalSlotFuture12 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId2, slotSharingGroupId, coLocationConstraint1), true, SlotProfile.noRequirements(), TestingUtils.infiniteTime()); CompletableFuture<LogicalSlot> logicalSlotFuture21 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId1, slotSharingGroupId, coLocationConstraint2), true, SlotProfile.noRequirements(), TestingUtils.infiniteTime()); final AllocationID allocationId1 = allocationIds.take(); final AllocationID allocationId2 = allocationIds.take(); Collection<SlotOffer> slotOfferFuture1 = slotPoolGateway.offerSlots( taskManagerLocation, new SimpleAckingTaskManagerGateway(), Collections.singletonList(new SlotOffer( allocationId1, 0, ResourceProfile.UNKNOWN))); Collection<SlotOffer> slotOfferFuture2 = slotPoolGateway.offerSlots( taskManagerLocation, new SimpleAckingTaskManagerGateway(), Collections.singletonList(new SlotOffer( allocationId2, 0, ResourceProfile.UNKNOWN))); assertFalse(slotOfferFuture1.isEmpty()); assertFalse(slotOfferFuture2.isEmpty()); LogicalSlot logicalSlot11 = logicalSlotFuture11.get(); LogicalSlot logicalSlot12 = logicalSlotFuture12.get(); LogicalSlot logicalSlot21 = logicalSlotFuture21.get(); LogicalSlot logicalSlot22 = logicalSlotFuture22.get(); assertEquals(logicalSlot11.getAllocationId(), logicalSlot12.getAllocationId()); assertEquals(logicalSlot21.getAllocationId(), logicalSlot22.getAllocationId()); assertNotEquals(logicalSlot11.getAllocationId(), logicalSlot21.getAllocationId()); }
Example 5
Source File: SharedSlotsTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * We allocate and the structure below and release it from the root. * * <pre> * Shared(0)(root) * | * +-- Simple(2)(sink) * | * +-- Shared(1)(co-location-group) * | | * | +-- Simple(0)(tail) * | +-- Simple(1)(head) * | * +-- Simple(0)(source) * </pre> */ @Test public void testReleaseTwoLevelsFromRoot() { try { JobVertexID sourceId = new JobVertexID(); JobVertexID headId = new JobVertexID(); JobVertexID tailId = new JobVertexID(); JobVertexID sinkId = new JobVertexID(); JobVertex headVertex = new JobVertex("head", headId); JobVertex tailVertex = new JobVertex("tail", tailId); SlotSharingGroup sharingGroup = new SlotSharingGroup(sourceId, headId, tailId, sinkId); SlotSharingGroupAssignment assignment = sharingGroup.getTaskAssignment(); assertEquals(0, assignment.getNumberOfSlots()); CoLocationGroup coLocationGroup = new CoLocationGroup(headVertex, tailVertex); CoLocationConstraint constraint = coLocationGroup.getLocationConstraint(0); assertFalse(constraint.isAssigned()); Instance instance = SchedulerTestUtils.getRandomInstance(1); // allocate a shared slot SharedSlot sharedSlot = instance.allocateSharedSlot(assignment); // get the first simple slot SimpleSlot sourceSlot = assignment.addSharedSlotAndAllocateSubSlot(sharedSlot, Locality.LOCAL, sourceId); SimpleSlot headSlot = assignment.getSlotForTask(constraint, NO_LOCATION); constraint.lockLocation(); SimpleSlot tailSlot = assignment.getSlotForTask(constraint, NO_LOCATION); SimpleSlot sinkSlot = assignment.getSlotForTask(sinkId, NO_LOCATION); assertEquals(4, sharedSlot.getNumberLeaves()); // release all sourceSlot.releaseSlot(); headSlot.releaseSlot(); tailSlot.releaseSlot(); sinkSlot.releaseSlot(); assertTrue(sharedSlot.isReleased()); assertTrue(sourceSlot.isReleased()); assertTrue(headSlot.isReleased()); assertTrue(tailSlot.isReleased()); assertTrue(sinkSlot.isReleased()); assertTrue(constraint.getSharedSlot().isReleased()); assertTrue(constraint.isAssigned()); assertFalse(constraint.isAssignedAndAlive()); assertEquals(1, instance.getNumberOfAvailableSlots()); assertEquals(0, instance.getNumberOfAllocatedSlots()); assertEquals(0, assignment.getNumberOfSlots()); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example 6
Source File: ExecutionVertex.java From flink with Apache License 2.0 | 4 votes |
/** * Creates an ExecutionVertex. * * @param timeout * The RPC timeout to use for deploy / cancel calls * @param initialGlobalModVersion * The global modification version to initialize the first Execution with. * @param createTimestamp * The timestamp for the vertex creation, used to initialize the first Execution with. * @param maxPriorExecutionHistoryLength * The number of prior Executions (= execution attempts) to keep. */ public ExecutionVertex( ExecutionJobVertex jobVertex, int subTaskIndex, IntermediateResult[] producedDataSets, Time timeout, long initialGlobalModVersion, long createTimestamp, int maxPriorExecutionHistoryLength) { this.jobVertex = jobVertex; this.subTaskIndex = subTaskIndex; this.executionVertexId = new ExecutionVertexID(jobVertex.getJobVertexId(), subTaskIndex); this.taskNameWithSubtask = String.format("%s (%d/%d)", jobVertex.getJobVertex().getName(), subTaskIndex + 1, jobVertex.getParallelism()); this.resultPartitions = new LinkedHashMap<>(producedDataSets.length, 1); for (IntermediateResult result : producedDataSets) { IntermediateResultPartition irp = new IntermediateResultPartition(result, this, subTaskIndex); result.setPartition(subTaskIndex, irp); resultPartitions.put(irp.getPartitionId(), irp); } this.inputEdges = new ExecutionEdge[jobVertex.getJobVertex().getInputs().size()][]; this.priorExecutions = new EvictingBoundedList<>(maxPriorExecutionHistoryLength); this.currentExecution = new Execution( getExecutionGraph().getFutureExecutor(), this, 0, initialGlobalModVersion, createTimestamp, timeout); // create a co-location scheduling hint, if necessary CoLocationGroup clg = jobVertex.getCoLocationGroup(); if (clg != null) { this.locationConstraint = clg.getLocationConstraint(subTaskIndex); } else { this.locationConstraint = null; } getExecutionGraph().registerExecution(currentExecution); this.timeout = timeout; this.inputSplits = new ArrayList<>(); }
Example 7
Source File: ExecutionVertex.java From flink with Apache License 2.0 | 4 votes |
/** * Archives the current Execution and creates a new Execution for this vertex. * * <p>This method atomically checks if the ExecutionGraph is still of an expected * global mod. version and replaces the execution if that is the case. If the ExecutionGraph * has increased its global mod. version in the meantime, this operation fails. * * <p>This mechanism can be used to prevent conflicts between various concurrent recovery and * reconfiguration actions in a similar way as "optimistic concurrency control". * * @param timestamp * The creation timestamp for the new Execution * @param originatingGlobalModVersion * * @return Returns the new created Execution. * * @throws GlobalModVersionMismatch Thrown, if the execution graph has a new global mod * version than the one passed to this message. */ public Execution resetForNewExecution(final long timestamp, final long originatingGlobalModVersion) throws GlobalModVersionMismatch { LOG.debug("Resetting execution vertex {} for new execution.", getTaskNameWithSubtaskIndex()); synchronized (priorExecutions) { // check if another global modification has been triggered since the // action that originally caused this reset/restart happened final long actualModVersion = getExecutionGraph().getGlobalModVersion(); if (actualModVersion > originatingGlobalModVersion) { // global change happened since, reject this action throw new GlobalModVersionMismatch(originatingGlobalModVersion, actualModVersion); } final Execution oldExecution = currentExecution; final ExecutionState oldState = oldExecution.getState(); if (oldState.isTerminal()) { if (oldState == FINISHED) { // pipelined partitions are released in Execution#cancel(), covering both job failures and vertex resets // do not release pipelined partitions here to save RPC calls oldExecution.handlePartitionCleanup(false, true); getExecutionGraph().getPartitionReleaseStrategy().vertexUnfinished(executionVertexId); } priorExecutions.add(oldExecution.archive()); final Execution newExecution = new Execution( getExecutionGraph().getFutureExecutor(), this, oldExecution.getAttemptNumber() + 1, originatingGlobalModVersion, timestamp, timeout); currentExecution = newExecution; synchronized (inputSplits) { InputSplitAssigner assigner = jobVertex.getSplitAssigner(); if (assigner != null) { assigner.returnInputSplit(inputSplits, getParallelSubtaskIndex()); inputSplits.clear(); } } CoLocationGroup grp = jobVertex.getCoLocationGroup(); if (grp != null) { locationConstraint = grp.getLocationConstraint(subTaskIndex); } // register this execution at the execution graph, to receive call backs getExecutionGraph().registerExecution(newExecution); // if the execution was 'FINISHED' before, tell the ExecutionGraph that // we take one step back on the road to reaching global FINISHED if (oldState == FINISHED) { getExecutionGraph().vertexUnFinished(); } // reset the intermediate results for (IntermediateResultPartition resultPartition : resultPartitions.values()) { resultPartition.resetForNewExecution(); } return newExecution; } else { throw new IllegalStateException("Cannot reset a vertex that is in non-terminal state " + oldState); } } }
Example 8
Source File: SlotPoolCoLocationTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests the scheduling of two tasks with a parallelism of 2 and a co-location constraint. */ @Test public void testSimpleCoLocatedSlotScheduling() throws ExecutionException, InterruptedException { final BlockingQueue<AllocationID> allocationIds = new ArrayBlockingQueue<>(2); final TestingResourceManagerGateway testingResourceManagerGateway = slotPoolResource.getTestingResourceManagerGateway(); testingResourceManagerGateway.setRequestSlotConsumer( (SlotRequest slotRequest) -> allocationIds.offer(slotRequest.getAllocationId())); final TaskManagerLocation taskManagerLocation = new LocalTaskManagerLocation(); final SlotPool slotPoolGateway = slotPoolResource.getSlotPool(); slotPoolGateway.registerTaskManager(taskManagerLocation.getResourceID()); CoLocationGroup group = new CoLocationGroup(); CoLocationConstraint coLocationConstraint1 = group.getLocationConstraint(0); CoLocationConstraint coLocationConstraint2 = group.getLocationConstraint(1); final SlotSharingGroupId slotSharingGroupId = new SlotSharingGroupId(); JobVertexID jobVertexId1 = new JobVertexID(); JobVertexID jobVertexId2 = new JobVertexID(); final SlotProvider slotProvider = slotPoolResource.getSlotProvider(); CompletableFuture<LogicalSlot> logicalSlotFuture11 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId1, slotSharingGroupId, coLocationConstraint1), true, SlotProfile.noRequirements(), TestingUtils.infiniteTime()); CompletableFuture<LogicalSlot> logicalSlotFuture22 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId2, slotSharingGroupId, coLocationConstraint2), true, SlotProfile.noRequirements(), TestingUtils.infiniteTime()); CompletableFuture<LogicalSlot> logicalSlotFuture12 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId2, slotSharingGroupId, coLocationConstraint1), true, SlotProfile.noRequirements(), TestingUtils.infiniteTime()); CompletableFuture<LogicalSlot> logicalSlotFuture21 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId1, slotSharingGroupId, coLocationConstraint2), true, SlotProfile.noRequirements(), TestingUtils.infiniteTime()); final AllocationID allocationId1 = allocationIds.take(); final AllocationID allocationId2 = allocationIds.take(); Collection<SlotOffer> slotOfferFuture1 = slotPoolGateway.offerSlots( taskManagerLocation, new SimpleAckingTaskManagerGateway(), Collections.singletonList(new SlotOffer( allocationId1, 0, ResourceProfile.UNKNOWN))); Collection<SlotOffer> slotOfferFuture2 = slotPoolGateway.offerSlots( taskManagerLocation, new SimpleAckingTaskManagerGateway(), Collections.singletonList(new SlotOffer( allocationId2, 0, ResourceProfile.UNKNOWN))); assertFalse(slotOfferFuture1.isEmpty()); assertFalse(slotOfferFuture2.isEmpty()); LogicalSlot logicalSlot11 = logicalSlotFuture11.get(); LogicalSlot logicalSlot12 = logicalSlotFuture12.get(); LogicalSlot logicalSlot21 = logicalSlotFuture21.get(); LogicalSlot logicalSlot22 = logicalSlotFuture22.get(); assertEquals(logicalSlot11.getAllocationId(), logicalSlot12.getAllocationId()); assertEquals(logicalSlot21.getAllocationId(), logicalSlot22.getAllocationId()); assertNotEquals(logicalSlot11.getAllocationId(), logicalSlot21.getAllocationId()); }
Example 9
Source File: SlotPoolCoLocationTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testCoLocatedSlotRequestsFailBeforeResolved() throws ExecutionException, InterruptedException { final ResourceProfile rp1 = new ResourceProfile(1.0, 100); final ResourceProfile rp2 = new ResourceProfile(2.0, 200); final ResourceProfile rp3 = new ResourceProfile(5.0, 500); final ResourceProfile allocatedSlotRp = new ResourceProfile(3.0, 300); final BlockingQueue<AllocationID> allocationIds = new ArrayBlockingQueue<>(1); final TestingResourceManagerGateway testingResourceManagerGateway = slotPoolResource.getTestingResourceManagerGateway(); testingResourceManagerGateway.setRequestSlotConsumer( (SlotRequest slotRequest) -> allocationIds.offer(slotRequest.getAllocationId())); final TaskManagerLocation taskManagerLocation = new LocalTaskManagerLocation(); final SlotPool slotPoolGateway = slotPoolResource.getSlotPool(); slotPoolGateway.registerTaskManager(taskManagerLocation.getResourceID()); CoLocationGroup group = new CoLocationGroup(); CoLocationConstraint coLocationConstraint1 = group.getLocationConstraint(0); final SlotSharingGroupId slotSharingGroupId = new SlotSharingGroupId(); JobVertexID jobVertexId1 = new JobVertexID(); JobVertexID jobVertexId2 = new JobVertexID(); JobVertexID jobVertexId3 = new JobVertexID(); final SlotProvider slotProvider = slotPoolResource.getSlotProvider(); CompletableFuture<LogicalSlot> logicalSlotFuture1 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId1, slotSharingGroupId, coLocationConstraint1), true, SlotProfile.noLocality(rp1), TestingUtils.infiniteTime()); CompletableFuture<LogicalSlot> logicalSlotFuture2 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId2, slotSharingGroupId, coLocationConstraint1), true, SlotProfile.noLocality(rp2), TestingUtils.infiniteTime()); CompletableFuture<LogicalSlot> logicalSlotFuture3 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId3, slotSharingGroupId, coLocationConstraint1), true, SlotProfile.noLocality(rp3), TestingUtils.infiniteTime()); final AllocationID allocationId1 = allocationIds.take(); Collection<SlotOffer> slotOfferFuture1 = slotPoolGateway.offerSlots( taskManagerLocation, new SimpleAckingTaskManagerGateway(), Collections.singletonList(new SlotOffer( allocationId1, 0, allocatedSlotRp))); assertFalse(slotOfferFuture1.isEmpty()); for (CompletableFuture<LogicalSlot> logicalSlotFuture : Arrays.asList(logicalSlotFuture1, logicalSlotFuture2, logicalSlotFuture3)) { assertTrue(logicalSlotFuture.isDone() && logicalSlotFuture.isCompletedExceptionally()); logicalSlotFuture.whenComplete((LogicalSlot ignored, Throwable throwable) -> { assertTrue(throwable instanceof SharedSlotOversubscribedException); assertTrue(((SharedSlotOversubscribedException) throwable).canRetry()); }); } }
Example 10
Source File: SlotPoolCoLocationTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testCoLocatedSlotRequestsFailAfterResolved() throws ExecutionException, InterruptedException { final ResourceProfile rp1 = new ResourceProfile(1.0, 100); final ResourceProfile rp2 = new ResourceProfile(2.0, 200); final ResourceProfile rp3 = new ResourceProfile(5.0, 500); final ResourceProfile allocatedSlotRp = new ResourceProfile(3.0, 300); final BlockingQueue<AllocationID> allocationIds = new ArrayBlockingQueue<>(1); final TestingResourceManagerGateway testingResourceManagerGateway = slotPoolResource.getTestingResourceManagerGateway(); testingResourceManagerGateway.setRequestSlotConsumer( (SlotRequest slotRequest) -> allocationIds.offer(slotRequest.getAllocationId())); final TaskManagerLocation taskManagerLocation = new LocalTaskManagerLocation(); final SlotPool slotPoolGateway = slotPoolResource.getSlotPool(); slotPoolGateway.registerTaskManager(taskManagerLocation.getResourceID()); CoLocationGroup group = new CoLocationGroup(); CoLocationConstraint coLocationConstraint1 = group.getLocationConstraint(0); final SlotSharingGroupId slotSharingGroupId = new SlotSharingGroupId(); JobVertexID jobVertexId1 = new JobVertexID(); JobVertexID jobVertexId2 = new JobVertexID(); JobVertexID jobVertexId3 = new JobVertexID(); final SlotProvider slotProvider = slotPoolResource.getSlotProvider(); CompletableFuture<LogicalSlot> logicalSlotFuture1 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId1, slotSharingGroupId, coLocationConstraint1), true, SlotProfile.noLocality(rp1), TestingUtils.infiniteTime()); CompletableFuture<LogicalSlot> logicalSlotFuture2 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId2, slotSharingGroupId, coLocationConstraint1), true, SlotProfile.noLocality(rp2), TestingUtils.infiniteTime()); final AllocationID allocationId1 = allocationIds.take(); Collection<SlotOffer> slotOfferFuture1 = slotPoolGateway.offerSlots( taskManagerLocation, new SimpleAckingTaskManagerGateway(), Collections.singletonList(new SlotOffer( allocationId1, 0, allocatedSlotRp))); assertFalse(slotOfferFuture1.isEmpty()); CompletableFuture<LogicalSlot> logicalSlotFuture3 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId3, slotSharingGroupId, coLocationConstraint1), true, SlotProfile.noLocality(rp3), TestingUtils.infiniteTime()); LogicalSlot logicalSlot1 = logicalSlotFuture1.get(); LogicalSlot logicalSlot2 = logicalSlotFuture2.get(); assertEquals(allocationId1, logicalSlot1.getAllocationId()); assertEquals(allocationId1, logicalSlot2.getAllocationId()); assertTrue(logicalSlotFuture3.isDone() && logicalSlotFuture3.isCompletedExceptionally()); logicalSlotFuture3.whenComplete((LogicalSlot ignored, Throwable throwable) -> { assertTrue(throwable instanceof SharedSlotOversubscribedException); assertTrue(((SharedSlotOversubscribedException) throwable).canRetry()); }); }
Example 11
Source File: ExecutionVertex.java From flink with Apache License 2.0 | 4 votes |
/** * Creates an ExecutionVertex. * * @param timeout * The RPC timeout to use for deploy / cancel calls * @param initialGlobalModVersion * The global modification version to initialize the first Execution with. * @param createTimestamp * The timestamp for the vertex creation, used to initialize the first Execution with. * @param maxPriorExecutionHistoryLength * The number of prior Executions (= execution attempts) to keep. */ public ExecutionVertex( ExecutionJobVertex jobVertex, int subTaskIndex, IntermediateResult[] producedDataSets, Time timeout, long initialGlobalModVersion, long createTimestamp, int maxPriorExecutionHistoryLength) { this.jobVertex = jobVertex; this.subTaskIndex = subTaskIndex; this.executionVertexId = new ExecutionVertexID(jobVertex.getJobVertexId(), subTaskIndex); this.taskNameWithSubtask = String.format("%s (%d/%d)", jobVertex.getJobVertex().getName(), subTaskIndex + 1, jobVertex.getParallelism()); this.resultPartitions = new LinkedHashMap<>(producedDataSets.length, 1); for (IntermediateResult result : producedDataSets) { IntermediateResultPartition irp = new IntermediateResultPartition(result, this, subTaskIndex); result.setPartition(subTaskIndex, irp); resultPartitions.put(irp.getPartitionId(), irp); } this.inputEdges = new ExecutionEdge[jobVertex.getJobVertex().getInputs().size()][]; this.priorExecutions = new EvictingBoundedList<>(maxPriorExecutionHistoryLength); this.currentExecution = new Execution( getExecutionGraph().getFutureExecutor(), this, 0, initialGlobalModVersion, createTimestamp, timeout); // create a co-location scheduling hint, if necessary CoLocationGroup clg = jobVertex.getCoLocationGroup(); if (clg != null) { this.locationConstraint = clg.getLocationConstraint(subTaskIndex); } else { this.locationConstraint = null; } getExecutionGraph().registerExecution(currentExecution); this.timeout = timeout; this.inputSplits = new ArrayList<>(); }
Example 12
Source File: ExecutionVertex.java From flink with Apache License 2.0 | 4 votes |
private Execution resetForNewExecutionInternal(final long timestamp, final long originatingGlobalModVersion) { final Execution oldExecution = currentExecution; final ExecutionState oldState = oldExecution.getState(); if (oldState.isTerminal()) { if (oldState == FINISHED) { // pipelined partitions are released in Execution#cancel(), covering both job failures and vertex resets // do not release pipelined partitions here to save RPC calls oldExecution.handlePartitionCleanup(false, true); getExecutionGraph().getPartitionReleaseStrategy().vertexUnfinished(executionVertexId); } priorExecutions.add(oldExecution.archive()); final Execution newExecution = new Execution( getExecutionGraph().getFutureExecutor(), this, oldExecution.getAttemptNumber() + 1, originatingGlobalModVersion, timestamp, timeout); currentExecution = newExecution; synchronized (inputSplits) { InputSplitAssigner assigner = jobVertex.getSplitAssigner(); if (assigner != null) { assigner.returnInputSplit(inputSplits, getParallelSubtaskIndex()); inputSplits.clear(); } } CoLocationGroup grp = jobVertex.getCoLocationGroup(); if (grp != null) { locationConstraint = grp.getLocationConstraint(subTaskIndex); } // register this execution at the execution graph, to receive call backs getExecutionGraph().registerExecution(newExecution); // if the execution was 'FINISHED' before, tell the ExecutionGraph that // we take one step back on the road to reaching global FINISHED if (oldState == FINISHED) { getExecutionGraph().vertexUnFinished(); } // reset the intermediate results for (IntermediateResultPartition resultPartition : resultPartitions.values()) { resultPartition.resetForNewExecution(); } return newExecution; } else { throw new IllegalStateException("Cannot reset a vertex that is in non-terminal state " + oldState); } }
Example 13
Source File: SlotPoolCoLocationTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests the scheduling of two tasks with a parallelism of 2 and a co-location constraint. */ @Test public void testSimpleCoLocatedSlotScheduling() throws ExecutionException, InterruptedException { final BlockingQueue<AllocationID> allocationIds = new ArrayBlockingQueue<>(2); final TestingResourceManagerGateway testingResourceManagerGateway = slotPoolResource.getTestingResourceManagerGateway(); testingResourceManagerGateway.setRequestSlotConsumer( (SlotRequest slotRequest) -> allocationIds.offer(slotRequest.getAllocationId())); final TaskManagerLocation taskManagerLocation = new LocalTaskManagerLocation(); final SlotPool slotPoolGateway = slotPoolResource.getSlotPool(); slotPoolGateway.registerTaskManager(taskManagerLocation.getResourceID()); CoLocationGroup group = new CoLocationGroup(); CoLocationConstraint coLocationConstraint1 = group.getLocationConstraint(0); CoLocationConstraint coLocationConstraint2 = group.getLocationConstraint(1); final SlotSharingGroupId slotSharingGroupId = new SlotSharingGroupId(); JobVertexID jobVertexId1 = new JobVertexID(); JobVertexID jobVertexId2 = new JobVertexID(); final SlotProvider slotProvider = slotPoolResource.getSlotProvider(); CompletableFuture<LogicalSlot> logicalSlotFuture11 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId1, slotSharingGroupId, coLocationConstraint1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()); CompletableFuture<LogicalSlot> logicalSlotFuture22 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId2, slotSharingGroupId, coLocationConstraint2), SlotProfile.noRequirements(), TestingUtils.infiniteTime()); CompletableFuture<LogicalSlot> logicalSlotFuture12 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId2, slotSharingGroupId, coLocationConstraint1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()); CompletableFuture<LogicalSlot> logicalSlotFuture21 = slotProvider.allocateSlot( new ScheduledUnit( jobVertexId1, slotSharingGroupId, coLocationConstraint2), SlotProfile.noRequirements(), TestingUtils.infiniteTime()); final AllocationID allocationId1 = allocationIds.take(); final AllocationID allocationId2 = allocationIds.take(); Collection<SlotOffer> slotOfferFuture1 = slotPoolGateway.offerSlots( taskManagerLocation, new SimpleAckingTaskManagerGateway(), Collections.singletonList(new SlotOffer( allocationId1, 0, ResourceProfile.ANY))); Collection<SlotOffer> slotOfferFuture2 = slotPoolGateway.offerSlots( taskManagerLocation, new SimpleAckingTaskManagerGateway(), Collections.singletonList(new SlotOffer( allocationId2, 0, ResourceProfile.ANY))); assertFalse(slotOfferFuture1.isEmpty()); assertFalse(slotOfferFuture2.isEmpty()); LogicalSlot logicalSlot11 = logicalSlotFuture11.get(); LogicalSlot logicalSlot12 = logicalSlotFuture12.get(); LogicalSlot logicalSlot21 = logicalSlotFuture21.get(); LogicalSlot logicalSlot22 = logicalSlotFuture22.get(); assertEquals(logicalSlot11.getAllocationId(), logicalSlot12.getAllocationId()); assertEquals(logicalSlot21.getAllocationId(), logicalSlot22.getAllocationId()); assertNotEquals(logicalSlot11.getAllocationId(), logicalSlot21.getAllocationId()); }