org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor Java Examples

The following examples show how to use org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TaskTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testExecutionFailsInNetworkRegistrationForPartitions() throws Exception {
	final PartitionDescriptor partitionDescriptor = new PartitionDescriptor(
		new IntermediateDataSetID(),
		new IntermediateResultPartitionID(),
		ResultPartitionType.PIPELINED,
		1,
		1);
	final ShuffleDescriptor shuffleDescriptor = NettyShuffleDescriptorBuilder.newBuilder().buildLocal();
	final ResultPartitionDeploymentDescriptor dummyPartition = new ResultPartitionDeploymentDescriptor(
		partitionDescriptor,
		shuffleDescriptor,
		1,
		false);
	testExecutionFailsInNetworkRegistration(Collections.singleton(dummyPartition), Collections.emptyList());
}
 
Example #2
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private TaskDeploymentDescriptor createSender(
		NettyShuffleDescriptor shuffleDescriptor,
		Class<? extends AbstractInvokable> abstractInvokable) throws IOException {
	PartitionDescriptor partitionDescriptor = PartitionDescriptorBuilder
		.newBuilder()
		.setPartitionId(shuffleDescriptor.getResultPartitionID().getPartitionId())
		.build();
	ResultPartitionDeploymentDescriptor resultPartitionDeploymentDescriptor = new ResultPartitionDeploymentDescriptor(
		partitionDescriptor,
		shuffleDescriptor,
		1,
		true);
	return createTestTaskDeploymentDescriptor(
		"Sender",
		shuffleDescriptor.getResultPartitionID().getProducerId(),
		abstractInvokable,
		1,
		Collections.singletonList(resultPartitionDeploymentDescriptor),
		Collections.emptyList());
}
 
Example #3
Source File: JobMasterPartitionTrackerImpl.java    From flink with Apache License 2.0 6 votes vote down vote up
private void internalReleaseOrPromotePartitionsOnTaskExecutor(
	ResourceID potentialPartitionLocation,
	Collection<ResultPartitionDeploymentDescriptor> partitionDeploymentDescriptors) {

	Map<Boolean, Set<ResultPartitionID>> partitionsToReleaseByPersistence = partitionDeploymentDescriptors.stream()
		.filter(JobMasterPartitionTrackerImpl::isPartitionWithLocalResources)
		.collect(Collectors.partitioningBy(
			resultPartitionDeploymentDescriptor -> resultPartitionDeploymentDescriptor.getPartitionType().isPersistent(),
			Collectors.mapping(JobMasterPartitionTrackerImpl::getResultPartitionId, Collectors.toSet())));

	internalReleaseOrPromotePartitionsOnTaskExecutor(
		potentialPartitionLocation,
		partitionsToReleaseByPersistence.get(false),
		partitionsToReleaseByPersistence.get(true)
	);
}
 
Example #4
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private TaskDeploymentDescriptor createTestTaskDeploymentDescriptor(
	String taskName,
	ExecutionAttemptID eid,
	Class<? extends AbstractInvokable> abstractInvokable,
	int maxNumberOfSubtasks,
	List<ResultPartitionDeploymentDescriptor> producedPartitions,
	List<InputGateDeploymentDescriptor> inputGates
) throws IOException {
	Preconditions.checkNotNull(producedPartitions);
	Preconditions.checkNotNull(inputGates);
	return createTaskDeploymentDescriptor(
		jobId, testName.getMethodName(), eid,
		new SerializedValue<>(new ExecutionConfig()), taskName, maxNumberOfSubtasks, 0, 1, 0,
		new Configuration(), new Configuration(), abstractInvokable.getName(),
		producedPartitions,
		inputGates,
		Collections.emptyList(),
		Collections.emptyList(),
		0);
}
 
Example #5
Source File: ConsumableNotifyingResultPartitionWriterDecorator.java    From flink with Apache License 2.0 6 votes vote down vote up
public static ResultPartitionWriter[] decorate(
		Collection<ResultPartitionDeploymentDescriptor> descs,
		ResultPartitionWriter[] partitionWriters,
		TaskActions taskActions,
		JobID jobId,
		ResultPartitionConsumableNotifier notifier) {

	ResultPartitionWriter[] consumableNotifyingPartitionWriters = new ResultPartitionWriter[partitionWriters.length];
	int counter = 0;
	for (ResultPartitionDeploymentDescriptor desc : descs) {
		if (desc.sendScheduleOrUpdateConsumersMessage() && desc.getPartitionType().isPipelined()) {
			consumableNotifyingPartitionWriters[counter] = new ConsumableNotifyingResultPartitionWriterDecorator(
				taskActions,
				jobId,
				partitionWriters[counter],
				notifier);
		} else {
			consumableNotifyingPartitionWriters[counter] = partitionWriters[counter];
		}
		counter++;
	}
	return consumableNotifyingPartitionWriters;
}
 
Example #6
Source File: PartitionTrackerImpl.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void stopTrackingAndReleasePartitions(Collection<ResultPartitionID> resultPartitionIds) {
	Preconditions.checkNotNull(resultPartitionIds);

	// stop tracking partitions to be released and group them by task executor ID
	Map<ResourceID, List<ResultPartitionDeploymentDescriptor>> partitionsToReleaseByResourceId = resultPartitionIds.stream()
		.map(this::internalStopTrackingPartition)
		.filter(Optional::isPresent)
		.map(Optional::get)
		.collect(Collectors.groupingBy(
			partitionMetaData -> partitionMetaData.producingTaskExecutorResourceId,
			Collectors.mapping(
				partitionMetaData -> partitionMetaData.resultPartitionDeploymentDescriptor,
				toList())));

	partitionsToReleaseByResourceId.forEach(this::internalReleasePartitions);
}
 
Example #7
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private TaskDeploymentDescriptor createSender(
		NettyShuffleDescriptor shuffleDescriptor,
		Class<? extends AbstractInvokable> abstractInvokable) throws IOException {
	PartitionDescriptor partitionDescriptor = new PartitionDescriptor(
		new IntermediateDataSetID(),
		shuffleDescriptor.getResultPartitionID().getPartitionId(),
		ResultPartitionType.PIPELINED,
		1,
		0);
	ResultPartitionDeploymentDescriptor resultPartitionDeploymentDescriptor = new ResultPartitionDeploymentDescriptor(
		partitionDescriptor,
		shuffleDescriptor,
		1,
		true);
	return createTestTaskDeploymentDescriptor(
		"Sender",
		shuffleDescriptor.getResultPartitionID().getProducerId(),
		abstractInvokable,
		1,
		Collections.singletonList(resultPartitionDeploymentDescriptor),
		Collections.emptyList());
}
 
Example #8
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private TaskDeploymentDescriptor createTestTaskDeploymentDescriptor(
	String taskName,
	ExecutionAttemptID eid,
	Class<? extends AbstractInvokable> abstractInvokable,
	int maxNumberOfSubtasks,
	Collection<ResultPartitionDeploymentDescriptor> producedPartitions,
	Collection<InputGateDeploymentDescriptor> inputGates
) throws IOException {
	Preconditions.checkNotNull(producedPartitions);
	Preconditions.checkNotNull(inputGates);
	return createTaskDeploymentDescriptor(
		jobId, testName.getMethodName(), eid,
		new SerializedValue<>(new ExecutionConfig()), taskName, maxNumberOfSubtasks, 0, 1, 0,
		new Configuration(), new Configuration(), abstractInvokable.getName(),
		producedPartitions,
		inputGates,
		Collections.emptyList(),
		Collections.emptyList(),
		0);
}
 
Example #9
Source File: JobMasterPartitionTrackerImpl.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void stopTrackingAndReleasePartitions(Collection<ResultPartitionID> resultPartitionIds) {
	Preconditions.checkNotNull(resultPartitionIds);

	// stop tracking partitions to be released and group them by task executor ID
	Map<ResourceID, List<ResultPartitionDeploymentDescriptor>> partitionsToReleaseByResourceId =
		stopTrackingPartitions(resultPartitionIds)
			.stream()
			.collect(Collectors.groupingBy(
				PartitionTrackerEntry::getKey,
				Collectors.mapping(
					PartitionTrackerEntry::getMetaInfo,
					toList())));

	partitionsToReleaseByResourceId.forEach(this::internalReleasePartitions);
}
 
Example #10
Source File: NettyShuffleEnvironment.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public List<ResultPartition> createResultPartitionWriters(
		ShuffleIOOwnerContext ownerContext,
		List<ResultPartitionDeploymentDescriptor> resultPartitionDeploymentDescriptors) {
	synchronized (lock) {
		Preconditions.checkState(!isClosed, "The NettyShuffleEnvironment has already been shut down.");

		ResultPartition[] resultPartitions = new ResultPartition[resultPartitionDeploymentDescriptors.size()];
		for (int partitionIndex = 0; partitionIndex < resultPartitions.length; partitionIndex++) {
			resultPartitions[partitionIndex] = resultPartitionFactory.create(
				ownerContext.getOwnerName(),
				partitionIndex,
				resultPartitionDeploymentDescriptors.get(partitionIndex));
		}

		registerOutputMetrics(config.isNetworkDetailedMetrics(), ownerContext.getOutputGroup(), resultPartitions);
		return  Arrays.asList(resultPartitions);
	}
}
 
Example #11
Source File: Execution.java    From flink with Apache License 2.0 6 votes vote down vote up
private void sendReleaseIntermediateResultPartitionsRpcCall() {
	LOG.info("Discarding the results produced by task execution {}.", attemptId);
	final LogicalSlot slot = assignedResource;

	if (slot != null) {
		final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();

		final ShuffleMaster<?> shuffleMaster = getVertex().getExecutionGraph().getShuffleMaster();

		Set<ResultPartitionID> partitionIds = producedPartitions.values().stream()
			.filter(resultPartitionDeploymentDescriptor -> resultPartitionDeploymentDescriptor.getPartitionType().isPipelined())
			.map(ResultPartitionDeploymentDescriptor::getShuffleDescriptor)
			.peek(shuffleMaster::releasePartitionExternally)
			.map(ShuffleDescriptor::getResultPartitionID)
			.collect(Collectors.toSet());

		if (!partitionIds.isEmpty()) {
			// TODO For some tests this could be a problem when querying too early if all resources were released
			taskManagerGateway.releasePartitions(getVertex().getJobId(), partitionIds);
		}
	}
}
 
Example #12
Source File: ConsumableNotifyingResultPartitionWriterDecorator.java    From flink with Apache License 2.0 6 votes vote down vote up
public static ResultPartitionWriter[] decorate(
		Collection<ResultPartitionDeploymentDescriptor> descs,
		ResultPartitionWriter[] partitionWriters,
		TaskActions taskActions,
		JobID jobId,
		ResultPartitionConsumableNotifier notifier) {

	ResultPartitionWriter[] consumableNotifyingPartitionWriters = new ResultPartitionWriter[partitionWriters.length];
	int counter = 0;
	for (ResultPartitionDeploymentDescriptor desc : descs) {
		if (desc.sendScheduleOrUpdateConsumersMessage() && desc.getPartitionType().isPipelined()) {
			consumableNotifyingPartitionWriters[counter] = new ConsumableNotifyingResultPartitionWriterDecorator(
				taskActions,
				jobId,
				partitionWriters[counter],
				notifier);
		} else {
			consumableNotifyingPartitionWriters[counter] = partitionWriters[counter];
		}
		counter++;
	}
	return consumableNotifyingPartitionWriters;
}
 
Example #13
Source File: NettyShuffleEnvironment.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public Collection<ResultPartition> createResultPartitionWriters(
		ShuffleIOOwnerContext ownerContext,
		Collection<ResultPartitionDeploymentDescriptor> resultPartitionDeploymentDescriptors) {
	synchronized (lock) {
		Preconditions.checkState(!isClosed, "The NettyShuffleEnvironment has already been shut down.");

		ResultPartition[] resultPartitions = new ResultPartition[resultPartitionDeploymentDescriptors.size()];
		int counter = 0;
		for (ResultPartitionDeploymentDescriptor rpdd : resultPartitionDeploymentDescriptors) {
			resultPartitions[counter++] = resultPartitionFactory.create(ownerContext.getOwnerName(), rpdd);
		}

		registerOutputMetrics(config.isNetworkDetailedMetrics(), ownerContext.getOutputGroup(), resultPartitions);
		return  Arrays.asList(resultPartitions);
	}
}
 
Example #14
Source File: ResultPartitionFactoryTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static ResultPartition createResultPartition(
		boolean releasePartitionOnConsumption,
		ResultPartitionType partitionType) {
	ResultPartitionFactory factory = new ResultPartitionFactory(
		new ResultPartitionManager(),
		fileChannelManager,
		new NetworkBufferPool(1, SEGMENT_SIZE, 1),
		BoundedBlockingSubpartitionType.AUTO,
		1,
		1,
		SEGMENT_SIZE,
		releasePartitionOnConsumption);

	final ResultPartitionDeploymentDescriptor descriptor = new ResultPartitionDeploymentDescriptor(
		new PartitionDescriptor(
			new IntermediateDataSetID(),
			new IntermediateResultPartitionID(),
			partitionType,
			1,
			0),
		NettyShuffleDescriptorBuilder.newBuilder().buildLocal(),
		1,
		true
	);

	return factory.create("test", descriptor);
}
 
Example #15
Source File: TaskExecutor.java    From flink with Apache License 2.0 5 votes vote down vote up
private Stream<ResultPartitionDeploymentDescriptor> filterPartitionsRequiringRelease(Collection<ResultPartitionDeploymentDescriptor> producedResultPartitions) {
	return producedResultPartitions.stream()
		// only blocking partitions require explicit release call
		.filter(d -> d.getPartitionType().isBlocking())
		// partitions without local resources don't store anything on the TaskExecutor
		.filter(d -> d.getShuffleDescriptor().storesLocalResourcesOn().isPresent());
}
 
Example #16
Source File: PartitionTrackerImplTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static ResultPartitionDeploymentDescriptor createResultPartitionDeploymentDescriptor(
	ResultPartitionID resultPartitionId,
	ResultPartitionType type,
	boolean hasLocalResources) {

	return new ResultPartitionDeploymentDescriptor(
		new PartitionDescriptor(
			new IntermediateDataSetID(),
			resultPartitionId.getPartitionId(),
			type,
			1,
			0),
		new ShuffleDescriptor() {
			@Override
			public ResultPartitionID getResultPartitionID() {
				return resultPartitionId;
			}

			@Override
			public Optional<ResourceID> storesLocalResourcesOn() {
				return hasLocalResources
					? Optional.of(ResourceID.generate())
					: Optional.empty();
			}
		},
		1,
		true);
}
 
Example #17
Source File: ExecutionVertexDeploymentTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that the lazy scheduling flag is correctly forwarded to the produced partition descriptors.
 */
@Test
public void testTddProducedPartitionsLazyScheduling() throws Exception {
	for (ScheduleMode scheduleMode: ScheduleMode.values()) {
		ExecutionJobVertex jobVertex = getExecutionVertex(
			new JobVertexID(),
			new DirectScheduledExecutorService(),
			scheduleMode);

		IntermediateResult result =
			new IntermediateResult(new IntermediateDataSetID(), jobVertex, 1, ResultPartitionType.PIPELINED);

		ExecutionAttemptID attemptID = new ExecutionAttemptID();
		ExecutionVertex vertex =
			new ExecutionVertex(jobVertex, 0, new IntermediateResult[]{result}, Time.minutes(1));
		TaskDeploymentDescriptorFactory tddFactory =
			TaskDeploymentDescriptorFactory.fromExecutionVertex(vertex, 1);

		ExecutionEdge mockEdge = createMockExecutionEdge(1);

		result.getPartitions()[0].addConsumerGroup();
		result.getPartitions()[0].addConsumer(mockEdge, 0);

		TaskManagerLocation location =
			new TaskManagerLocation(ResourceID.generate(), InetAddress.getLoopbackAddress(), 1);

		TaskDeploymentDescriptor tdd = tddFactory.createDeploymentDescriptor(
			new AllocationID(),
			0,
			null,
			Execution.registerProducedPartitions(vertex, location, attemptID).get().values());

		Collection<ResultPartitionDeploymentDescriptor> producedPartitions = tdd.getProducedPartitions();

		assertEquals(1, producedPartitions.size());
		ResultPartitionDeploymentDescriptor desc = producedPartitions.iterator().next();
		assertEquals(scheduleMode.allowLazyDeployment(), desc.sendScheduleOrUpdateConsumersMessage());
	}
}
 
Example #18
Source File: TaskTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private void testExecutionFailsInNetworkRegistration(
		Collection<ResultPartitionDeploymentDescriptor> resultPartitions,
		Collection<InputGateDeploymentDescriptor> inputGates) throws Exception {
	final String errorMessage = "Network buffer pool has already been destroyed.";

	final ResultPartitionConsumableNotifier consumableNotifier = new NoOpResultPartitionConsumableNotifier();
	final PartitionProducerStateChecker partitionProducerStateChecker = mock(PartitionProducerStateChecker.class);

	final QueuedNoOpTaskManagerActions taskManagerActions = new QueuedNoOpTaskManagerActions();
	final Task task = new TestTaskBuilder(shuffleEnvironment)
		.setTaskManagerActions(taskManagerActions)
		.setConsumableNotifier(consumableNotifier)
		.setPartitionProducerStateChecker(partitionProducerStateChecker)
		.setResultPartitions(resultPartitions)
		.setInputGates(inputGates)
		.build();

	// shut down the network to make the following task registration failure
	shuffleEnvironment.close();

	// should fail
	task.run();

	// verify final state
	assertEquals(ExecutionState.FAILED, task.getExecutionState());
	assertTrue(task.isCanceledOrFailed());
	assertTrue(task.getFailureCause().getMessage().contains(errorMessage));

	taskManagerActions.validateListenerMessage(ExecutionState.FAILED, task, new IllegalStateException(errorMessage));
}
 
Example #19
Source File: ResultPartitionFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
public ResultPartition create(
		String taskNameWithSubtaskAndId,
		ResultPartitionDeploymentDescriptor desc) {
	return create(
		taskNameWithSubtaskAndId,
		desc.getShuffleDescriptor().getResultPartitionID(),
		desc.getPartitionType(),
		desc.getNumberOfSubpartitions(),
		desc.getMaxParallelism(),
		createBufferPoolFactory(desc.getNumberOfSubpartitions(), desc.getPartitionType()));
}
 
Example #20
Source File: ExecutionPartitionLifecycleTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private void testPartitionTrackingForStateTransition(final Consumer<Execution> stateTransition, final PartitionReleaseResult partitionReleaseResult) throws Exception {
	CompletableFuture<Tuple2<ResourceID, ResultPartitionDeploymentDescriptor>> partitionStartTrackingFuture = new CompletableFuture<>();
	CompletableFuture<Collection<ResultPartitionID>> partitionStopTrackingFuture = new CompletableFuture<>();
	CompletableFuture<Collection<ResultPartitionID>> partitionStopTrackingAndReleaseFuture = new CompletableFuture<>();
	final TestingJobMasterPartitionTracker partitionTracker = new TestingJobMasterPartitionTracker();
	partitionTracker.setStartTrackingPartitionsConsumer(
		(resourceID, resultPartitionDeploymentDescriptor) ->
			partitionStartTrackingFuture.complete(Tuple2.of(resourceID, resultPartitionDeploymentDescriptor))
	);
	partitionTracker.setStopTrackingPartitionsConsumer(partitionStopTrackingFuture::complete);
	partitionTracker.setStopTrackingAndReleasePartitionsConsumer(partitionStopTrackingAndReleaseFuture::complete);

	setupExecutionGraphAndStartRunningJob(ResultPartitionType.BLOCKING, partitionTracker, new SimpleAckingTaskManagerGateway(), NettyShuffleMaster.INSTANCE);

	Tuple2<ResourceID, ResultPartitionDeploymentDescriptor> startTrackingCall = partitionStartTrackingFuture.get();
	assertThat(startTrackingCall.f0, equalTo(taskExecutorResourceId));
	assertThat(startTrackingCall.f1, equalTo(descriptor));

	stateTransition.accept(execution);

	switch (partitionReleaseResult) {
		case NONE:
			assertFalse(partitionStopTrackingFuture.isDone());
			assertFalse(partitionStopTrackingAndReleaseFuture.isDone());
			break;
		case STOP_TRACKING:
			assertTrue(partitionStopTrackingFuture.isDone());
			assertFalse(partitionStopTrackingAndReleaseFuture.isDone());
			final Collection<ResultPartitionID> stopTrackingCall = partitionStopTrackingFuture.get();
			assertEquals(Collections.singletonList(descriptor.getShuffleDescriptor().getResultPartitionID()), stopTrackingCall);
			break;
		case STOP_TRACKING_AND_RELEASE:
			assertFalse(partitionStopTrackingFuture.isDone());
			assertTrue(partitionStopTrackingAndReleaseFuture.isDone());
			final Collection<ResultPartitionID> stopTrackingAndReleaseCall = partitionStopTrackingAndReleaseFuture.get();
			assertEquals(Collections.singletonList(descriptor.getShuffleDescriptor().getResultPartitionID()), stopTrackingAndReleaseCall);
			break;
	}
}
 
Example #21
Source File: TaskExecutor.java    From flink with Apache License 2.0 5 votes vote down vote up
private void setupResultPartitionBookkeeping(
		JobID jobId,
		Collection<ResultPartitionDeploymentDescriptor> producedResultPartitions,
		CompletableFuture<ExecutionState> terminationFuture) {
	final Set<ResultPartitionID> partitionsRequiringRelease = filterPartitionsRequiringRelease(producedResultPartitions)
		.peek(rpdd -> partitionTracker.startTrackingPartition(jobId, TaskExecutorPartitionInfo.from(rpdd)))
		.map(ResultPartitionDeploymentDescriptor::getShuffleDescriptor)
		.map(ShuffleDescriptor::getResultPartitionID)
		.collect(Collectors.toSet());

	final CompletableFuture<ExecutionState> taskTerminationWithResourceCleanupFuture =
		terminationFuture.thenApplyAsync(
			executionState -> {
				if (executionState != ExecutionState.FINISHED) {
					partitionTracker.stopTrackingPartitions(partitionsRequiringRelease);
				}
				return executionState;
			},
			getMainThreadExecutor());

	taskResultPartitionCleanupFuturesPerJob.compute(
		jobId,
		(ignored, completableFutures) -> {
			if (completableFutures == null) {
				completableFutures = new ArrayList<>(4);
			}

			completableFutures.add(taskTerminationWithResourceCleanupFuture);
			return completableFutures;
		});
}
 
Example #22
Source File: ExecutionPartitionLifecycleTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private void testPartitionTrackingForStateTransition(final Consumer<Execution> stateTransition, final PartitionReleaseResult partitionReleaseResult) throws Exception {
	CompletableFuture<Tuple2<ResourceID, ResultPartitionDeploymentDescriptor>> partitionStartTrackingFuture = new CompletableFuture<>();
	CompletableFuture<Collection<ResultPartitionID>> partitionStopTrackingFuture = new CompletableFuture<>();
	CompletableFuture<Collection<ResultPartitionID>> partitionStopTrackingAndReleaseFuture = new CompletableFuture<>();
	final TestingPartitionTracker partitionTracker = new TestingPartitionTracker();
	partitionTracker.setStartTrackingPartitionsConsumer(
		(resourceID, resultPartitionDeploymentDescriptor) ->
			partitionStartTrackingFuture.complete(Tuple2.of(resourceID, resultPartitionDeploymentDescriptor))
	);
	partitionTracker.setStopTrackingPartitionsConsumer(partitionStopTrackingFuture::complete);
	partitionTracker.setStopTrackingAndReleasePartitionsConsumer(partitionStopTrackingAndReleaseFuture::complete);

	setupExecutionGraphAndStartRunningJob(ResultPartitionType.BLOCKING, partitionTracker, new SimpleAckingTaskManagerGateway(), NettyShuffleMaster.INSTANCE);

	Tuple2<ResourceID, ResultPartitionDeploymentDescriptor> startTrackingCall = partitionStartTrackingFuture.get();
	assertThat(startTrackingCall.f0, equalTo(taskExecutorResourceId));
	assertThat(startTrackingCall.f1, equalTo(descriptor));

	stateTransition.accept(execution);

	switch (partitionReleaseResult) {
		case NONE:
			assertFalse(partitionStopTrackingFuture.isDone());
			assertFalse(partitionStopTrackingAndReleaseFuture.isDone());
			break;
		case STOP_TRACKING:
			assertTrue(partitionStopTrackingFuture.isDone());
			assertFalse(partitionStopTrackingAndReleaseFuture.isDone());
			final Collection<ResultPartitionID> stopTrackingCall = partitionStopTrackingFuture.get();
			assertEquals(Collections.singletonList(descriptor.getShuffleDescriptor().getResultPartitionID()), stopTrackingCall);
			break;
		case STOP_TRACKING_AND_RELEASE:
			assertFalse(partitionStopTrackingFuture.isDone());
			assertTrue(partitionStopTrackingAndReleaseFuture.isDone());
			final Collection<ResultPartitionID> stopTrackingAndReleaseCall = partitionStopTrackingAndReleaseFuture.get();
			assertEquals(Collections.singletonList(descriptor.getShuffleDescriptor().getResultPartitionID()), stopTrackingAndReleaseCall);
			break;
	}
}
 
Example #23
Source File: Execution.java    From flink with Apache License 2.0 5 votes vote down vote up
private void startTrackingPartitions(final ResourceID taskExecutorId, final Collection<ResultPartitionDeploymentDescriptor> partitions) {
	JobMasterPartitionTracker partitionTracker = vertex.getExecutionGraph().getPartitionTracker();
	for (ResultPartitionDeploymentDescriptor partition : partitions) {
		partitionTracker.startTrackingPartition(
			taskExecutorId,
			partition);
	}
}
 
Example #24
Source File: JobMasterPartitionTrackerImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void stopTrackingAndReleasePartitionsFor(ResourceID producingTaskExecutorId) {
	Preconditions.checkNotNull(producingTaskExecutorId);

	Collection<ResultPartitionDeploymentDescriptor> resultPartitionIds =
		CollectionUtil.project(stopTrackingPartitionsFor(producingTaskExecutorId), PartitionTrackerEntry::getMetaInfo);

	internalReleasePartitions(producingTaskExecutorId, resultPartitionIds);
}
 
Example #25
Source File: JobMasterPartitionTrackerImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void stopTrackingAndReleaseOrPromotePartitionsFor(ResourceID producingTaskExecutorId) {
	Preconditions.checkNotNull(producingTaskExecutorId);

	Collection<ResultPartitionDeploymentDescriptor> resultPartitionIds =
		CollectionUtil.project(stopTrackingPartitionsFor(producingTaskExecutorId), PartitionTrackerEntry::getMetaInfo);

	internalReleaseOrPromotePartitions(producingTaskExecutorId, resultPartitionIds);
}
 
Example #26
Source File: JobMasterPartitionTrackerImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
private void internalReleasePartitions(
	ResourceID potentialPartitionLocation,
	Collection<ResultPartitionDeploymentDescriptor> partitionDeploymentDescriptors) {

	internalReleasePartitionsOnTaskExecutor(potentialPartitionLocation, partitionDeploymentDescriptors);
	internalReleasePartitionsOnShuffleMaster(partitionDeploymentDescriptors.stream());
}
 
Example #27
Source File: JobMasterPartitionTrackerImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
private void internalReleaseOrPromotePartitions(
	ResourceID potentialPartitionLocation,
	Collection<ResultPartitionDeploymentDescriptor> partitionDeploymentDescriptors) {

	internalReleaseOrPromotePartitionsOnTaskExecutor(potentialPartitionLocation, partitionDeploymentDescriptors);
	internalReleasePartitionsOnShuffleMaster(excludePersistentPartitions(partitionDeploymentDescriptors));
}
 
Example #28
Source File: JobMasterPartitionTrackerImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
private void internalReleasePartitionsOnTaskExecutor(
	ResourceID potentialPartitionLocation,
	Collection<ResultPartitionDeploymentDescriptor> partitionDeploymentDescriptors) {

	final Set<ResultPartitionID> partitionsRequiringRpcReleaseCalls = partitionDeploymentDescriptors.stream()
		.filter(JobMasterPartitionTrackerImpl::isPartitionWithLocalResources)
		.map(JobMasterPartitionTrackerImpl::getResultPartitionId)
		.collect(Collectors.toSet());

	internalReleaseOrPromotePartitionsOnTaskExecutor(
		potentialPartitionLocation,
		partitionsRequiringRpcReleaseCalls,
		Collections.emptySet()
	);
}
 
Example #29
Source File: TaskTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testExecutionFailsInNetworkRegistrationForPartitions() throws Exception {
	final PartitionDescriptor partitionDescriptor = PartitionDescriptorBuilder.newBuilder().build();
	final ShuffleDescriptor shuffleDescriptor = NettyShuffleDescriptorBuilder.newBuilder().buildLocal();
	final ResultPartitionDeploymentDescriptor dummyPartition = new ResultPartitionDeploymentDescriptor(
		partitionDescriptor,
		shuffleDescriptor,
		1,
		false);
	testExecutionFailsInNetworkRegistration(Collections.singletonList(dummyPartition), Collections.emptyList());
}
 
Example #30
Source File: TaskTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private void testExecutionFailsInNetworkRegistration(
		List<ResultPartitionDeploymentDescriptor> resultPartitions,
		List<InputGateDeploymentDescriptor> inputGates) throws Exception {
	final String errorMessage = "Network buffer pool has already been destroyed.";

	final ResultPartitionConsumableNotifier consumableNotifier = new NoOpResultPartitionConsumableNotifier();
	final PartitionProducerStateChecker partitionProducerStateChecker = mock(PartitionProducerStateChecker.class);

	final QueuedNoOpTaskManagerActions taskManagerActions = new QueuedNoOpTaskManagerActions();
	final Task task = new TestTaskBuilder(shuffleEnvironment)
		.setTaskManagerActions(taskManagerActions)
		.setConsumableNotifier(consumableNotifier)
		.setPartitionProducerStateChecker(partitionProducerStateChecker)
		.setResultPartitions(resultPartitions)
		.setInputGates(inputGates)
		.build();

	// shut down the network to make the following task registration failure
	shuffleEnvironment.close();

	// should fail
	task.run();

	// verify final state
	assertEquals(ExecutionState.FAILED, task.getExecutionState());
	assertTrue(task.isCanceledOrFailed());
	assertTrue(task.getFailureCause().getMessage().contains(errorMessage));

	taskManagerActions.validateListenerMessage(ExecutionState.FAILED, task, new IllegalStateException(errorMessage));
}