Java Code Examples for org.apache.flink.runtime.deployment.TaskDeploymentDescriptor#getProducedPartitions()
The following examples show how to use
org.apache.flink.runtime.deployment.TaskDeploymentDescriptor#getProducedPartitions() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ExecutionVertexDeploymentTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Tests that the lazy scheduling flag is correctly forwarded to the produced partition descriptors. */ @Test public void testTddProducedPartitionsLazyScheduling() throws Exception { ExecutionJobVertex jobVertex = getExecutionVertex(new JobVertexID(), new DirectScheduledExecutorService()); IntermediateResult result = new IntermediateResult(new IntermediateDataSetID(), jobVertex, 1, ResultPartitionType.PIPELINED); ExecutionVertex vertex = new ExecutionVertex(jobVertex, 0, new IntermediateResult[]{result}, Time.minutes(1)); ExecutionEdge mockEdge = createMockExecutionEdge(1); result.getPartitions()[0].addConsumerGroup(); result.getPartitions()[0].addConsumer(mockEdge, 0); SlotContext slotContext = mock(SlotContext.class); when(slotContext.getAllocationId()).thenReturn(new AllocationID()); LogicalSlot slot = mock(LogicalSlot.class); when(slot.getAllocationId()).thenReturn(new AllocationID()); for (ScheduleMode mode : ScheduleMode.values()) { vertex.getExecutionGraph().setScheduleMode(mode); TaskDeploymentDescriptor tdd = vertex.createDeploymentDescriptor(new ExecutionAttemptID(), slot, null, 1); Collection<ResultPartitionDeploymentDescriptor> producedPartitions = tdd.getProducedPartitions(); assertEquals(1, producedPartitions.size()); ResultPartitionDeploymentDescriptor desc = producedPartitions.iterator().next(); assertEquals(mode.allowLazyDeployment(), desc.sendScheduleOrUpdateConsumersMessage()); } }
Example 2
Source File: ExecutionVertexDeploymentTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that the lazy scheduling flag is correctly forwarded to the produced partition descriptors. */ @Test public void testTddProducedPartitionsLazyScheduling() throws Exception { for (ScheduleMode scheduleMode: ScheduleMode.values()) { ExecutionJobVertex jobVertex = getExecutionVertex( new JobVertexID(), new DirectScheduledExecutorService(), scheduleMode); IntermediateResult result = new IntermediateResult(new IntermediateDataSetID(), jobVertex, 1, ResultPartitionType.PIPELINED); ExecutionAttemptID attemptID = new ExecutionAttemptID(); ExecutionVertex vertex = new ExecutionVertex(jobVertex, 0, new IntermediateResult[]{result}, Time.minutes(1)); TaskDeploymentDescriptorFactory tddFactory = TaskDeploymentDescriptorFactory.fromExecutionVertex(vertex, 1); ExecutionEdge mockEdge = createMockExecutionEdge(1); result.getPartitions()[0].addConsumerGroup(); result.getPartitions()[0].addConsumer(mockEdge, 0); TaskManagerLocation location = new TaskManagerLocation(ResourceID.generate(), InetAddress.getLoopbackAddress(), 1); TaskDeploymentDescriptor tdd = tddFactory.createDeploymentDescriptor( new AllocationID(), 0, null, Execution.registerProducedPartitions(vertex, location, attemptID).get().values()); Collection<ResultPartitionDeploymentDescriptor> producedPartitions = tdd.getProducedPartitions(); assertEquals(1, producedPartitions.size()); ResultPartitionDeploymentDescriptor desc = producedPartitions.iterator().next(); assertEquals(scheduleMode.allowLazyDeployment(), desc.sendScheduleOrUpdateConsumersMessage()); } }
Example 3
Source File: ExecutionVertexDeploymentTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that the lazy scheduling flag is correctly forwarded to the produced partition descriptors. */ @Test public void testTddProducedPartitionsLazyScheduling() throws Exception { for (ScheduleMode scheduleMode: ScheduleMode.values()) { ExecutionJobVertex jobVertex = ExecutionGraphTestUtils.getExecutionJobVertex( new JobVertexID(), new DirectScheduledExecutorService(), scheduleMode); IntermediateResult result = new IntermediateResult(new IntermediateDataSetID(), jobVertex, 1, ResultPartitionType.PIPELINED); ExecutionAttemptID attemptID = new ExecutionAttemptID(); ExecutionVertex vertex = new ExecutionVertex(jobVertex, 0, new IntermediateResult[]{result}, Time.minutes(1)); TaskDeploymentDescriptorFactory tddFactory = TaskDeploymentDescriptorFactory.fromExecutionVertex(vertex, 1); ExecutionEdge mockEdge = createMockExecutionEdge(1); result.getPartitions()[0].addConsumerGroup(); result.getPartitions()[0].addConsumer(mockEdge, 0); TaskManagerLocation location = new TaskManagerLocation(ResourceID.generate(), InetAddress.getLoopbackAddress(), 1); TaskDeploymentDescriptor tdd = tddFactory.createDeploymentDescriptor( new AllocationID(), 0, null, Execution.registerProducedPartitions(vertex, location, attemptID, scheduleMode.allowLazyDeployment()).get().values()); Collection<ResultPartitionDeploymentDescriptor> producedPartitions = tdd.getProducedPartitions(); assertEquals(1, producedPartitions.size()); ResultPartitionDeploymentDescriptor desc = producedPartitions.iterator().next(); assertEquals(scheduleMode.allowLazyDeployment(), desc.sendScheduleOrUpdateConsumersMessage()); } }
Example 4
Source File: ExecutionGraphDeploymentTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testBuildDeploymentDescriptor() { try { final JobID jobId = new JobID(); final JobVertexID jid1 = new JobVertexID(); final JobVertexID jid2 = new JobVertexID(); final JobVertexID jid3 = new JobVertexID(); final JobVertexID jid4 = new JobVertexID(); JobVertex v1 = new JobVertex("v1", jid1); JobVertex v2 = new JobVertex("v2", jid2); JobVertex v3 = new JobVertex("v3", jid3); JobVertex v4 = new JobVertex("v4", jid4); v1.setParallelism(10); v2.setParallelism(10); v3.setParallelism(10); v4.setParallelism(10); v1.setInvokableClass(BatchTask.class); v2.setInvokableClass(BatchTask.class); v3.setInvokableClass(BatchTask.class); v4.setInvokableClass(BatchTask.class); v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); v3.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); v4.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); DirectScheduledExecutorService executor = new DirectScheduledExecutorService(); ExecutionGraph eg = createExecutionGraphWithoutQueuedScheduling( jobId, new TestingSlotProvider(ignore -> new CompletableFuture<>()), executor, executor); eg.start(ComponentMainThreadExecutorServiceAdapter.forMainThread()); checkJobOffloaded(eg); List<JobVertex> ordered = Arrays.asList(v1, v2, v3, v4); eg.attachJobGraph(ordered); ExecutionJobVertex ejv = eg.getAllVertices().get(jid2); ExecutionVertex vertex = ejv.getTaskVertices()[3]; final SimpleAckingTaskManagerGateway taskManagerGateway = new SimpleAckingTaskManagerGateway(); final CompletableFuture<TaskDeploymentDescriptor> tdd = new CompletableFuture<>(); taskManagerGateway.setSubmitConsumer(FunctionUtils.uncheckedConsumer(taskDeploymentDescriptor -> { taskDeploymentDescriptor.loadBigData(blobCache); tdd.complete(taskDeploymentDescriptor); })); final LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(taskManagerGateway).createTestingLogicalSlot(); assertEquals(ExecutionState.CREATED, vertex.getExecutionState()); vertex.getCurrentExecutionAttempt().registerProducedPartitions(slot.getTaskManagerLocation()).get(); vertex.deployToSlot(slot); assertEquals(ExecutionState.DEPLOYING, vertex.getExecutionState()); checkTaskOffloaded(eg, vertex.getJobvertexId()); TaskDeploymentDescriptor descr = tdd.get(); assertNotNull(descr); JobInformation jobInformation = descr.getSerializedJobInformation().deserializeValue(getClass().getClassLoader()); TaskInformation taskInformation = descr.getSerializedTaskInformation().deserializeValue(getClass().getClassLoader()); assertEquals(jobId, descr.getJobId()); assertEquals(jobId, jobInformation.getJobId()); assertEquals(jid2, taskInformation.getJobVertexId()); assertEquals(3, descr.getSubtaskIndex()); assertEquals(10, taskInformation.getNumberOfSubtasks()); assertEquals(BatchTask.class.getName(), taskInformation.getInvokableClassName()); assertEquals("v2", taskInformation.getTaskName()); Collection<ResultPartitionDeploymentDescriptor> producedPartitions = descr.getProducedPartitions(); Collection<InputGateDeploymentDescriptor> consumedPartitions = descr.getInputGates(); assertEquals(2, producedPartitions.size()); assertEquals(1, consumedPartitions.size()); Iterator<ResultPartitionDeploymentDescriptor> iteratorProducedPartitions = producedPartitions.iterator(); Iterator<InputGateDeploymentDescriptor> iteratorConsumedPartitions = consumedPartitions.iterator(); assertEquals(10, iteratorProducedPartitions.next().getNumberOfSubpartitions()); assertEquals(10, iteratorProducedPartitions.next().getNumberOfSubpartitions()); assertEquals(10, iteratorConsumedPartitions.next().getShuffleDescriptors().length); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }