Java Code Examples for org.apache.flink.runtime.executiongraph.ExecutionVertex#getJobvertexId()

The following examples show how to use org.apache.flink.runtime.executiongraph.ExecutionVertex#getJobvertexId() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DefaultFailoverTopology.java    From flink with Apache License 2.0 6 votes vote down vote up
public DefaultFailoverTopology(ExecutionGraph executionGraph) {
	checkNotNull(executionGraph);

	this.containsCoLocationConstraints = executionGraph.getAllVertices().values().stream()
		.map(ExecutionJobVertex::getCoLocationGroup)
		.anyMatch(Objects::nonNull);

	// generate vertices
	this.failoverVertices = new ArrayList<>();
	final Map<ExecutionVertex, DefaultFailoverVertex> failoverVertexMap = new IdentityHashMap<>();
	for (ExecutionVertex vertex : executionGraph.getAllExecutionVertices()) {
		final DefaultFailoverVertex failoverVertex = new DefaultFailoverVertex(
			new ExecutionVertexID(vertex.getJobvertexId(), vertex.getParallelSubtaskIndex()),
			vertex.getTaskNameWithSubtaskIndex());
		this.failoverVertices.add(failoverVertex);
		failoverVertexMap.put(vertex, failoverVertex);
	}

	// generate edges
	connectVerticesWithEdges(failoverVertexMap);
}
 
Example 2
Source File: ExecutionGraphToSchedulingTopologyAdapter.java    From flink with Apache License 2.0 5 votes vote down vote up
private static DefaultSchedulingExecutionVertex generateSchedulingExecutionVertex(
	ExecutionVertex vertex,
	List<DefaultSchedulingResultPartition> producedPartitions) {

	DefaultSchedulingExecutionVertex schedulingVertex = new DefaultSchedulingExecutionVertex(
		new ExecutionVertexID(vertex.getJobvertexId(), vertex.getParallelSubtaskIndex()),
		producedPartitions,
		new ExecutionStateSupplier(vertex),
		vertex.getInputDependencyConstraint());

	producedPartitions.forEach(partition -> partition.setProducer(schedulingVertex));

	return schedulingVertex;
}
 
Example 3
Source File: AdaptedRestartPipelinedRegionStrategyNG.java    From flink with Apache License 2.0 5 votes vote down vote up
private Map<JobVertexID, ExecutionJobVertex> getInvolvedExecutionJobVertices(
	final Set<ExecutionVertex> executionVertices) {

	Map<JobVertexID, ExecutionJobVertex> tasks = new HashMap<>();
	for (ExecutionVertex executionVertex : executionVertices) {
		JobVertexID jobvertexId = executionVertex.getJobvertexId();
		ExecutionJobVertex jobVertex = executionVertex.getJobVertex();
		tasks.putIfAbsent(jobvertexId, jobVertex);
	}
	return tasks;
}
 
Example 4
Source File: RestartPipelinedRegionStrategy.java    From flink with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
protected Map<JobVertexID, ExecutionJobVertex> initTasks(List<ExecutionVertex> connectedExecutions) {
	Map<JobVertexID, ExecutionJobVertex> tasks = new HashMap<>(connectedExecutions.size());
	for (ExecutionVertex executionVertex : connectedExecutions) {
		JobVertexID jobvertexId = executionVertex.getJobvertexId();
		ExecutionJobVertex jobVertex = executionVertex.getJobVertex();
		tasks.putIfAbsent(jobvertexId, jobVertex);
	}
	return tasks;
}
 
Example 5
Source File: ExecutionGraphToSchedulingTopologyAdapterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static void assertPartitionsEquals(
	Collection<IntermediateResultPartition> originalPartitions,
	Collection<SchedulingResultPartition> adaptedPartitions) {

	assertEquals(originalPartitions.size(), adaptedPartitions.size());

	for (IntermediateResultPartition originalPartition : originalPartitions) {
		SchedulingResultPartition adaptedPartition = adaptedPartitions.stream()
			.filter(adapted -> adapted.getId().equals(originalPartition.getPartitionId()))
			.findAny()
			.orElseThrow(() -> new AssertionError("Could not find matching adapted partition for " + originalPartition));

		assertPartitionEquals(originalPartition, adaptedPartition);

		List<ExecutionVertex> originalConsumers = originalPartition.getConsumers().stream()
			.flatMap(Collection::stream)
			.map(ExecutionEdge::getTarget)
			.collect(Collectors.toList());
		Collection<SchedulingExecutionVertex> adaptedConsumers = adaptedPartition.getConsumers();

		for (ExecutionVertex originalConsumer : originalConsumers) {
			// it is sufficient to verify that some vertex exists with the correct ID here,
			// since deep equality is verified later in the main loop
			// this DOES rely on an implicit assumption that the vertices objects returned by the topology are
			// identical to those stored in the partition
			ExecutionVertexID originalId = new ExecutionVertexID(originalConsumer.getJobvertexId(), originalConsumer.getParallelSubtaskIndex());
			assertTrue(adaptedConsumers.stream().anyMatch(adaptedConsumer -> adaptedConsumer.getId().equals(originalId)));
		}
	}
}
 
Example 6
Source File: Scheduler.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tries to allocate a new slot for a vertex that is part of a slot sharing group. If one
 * of the instances has a slot available, the method will allocate it as a shared slot, add that
 * shared slot to the sharing group, and allocate a simple slot from that shared slot.
 * 
 * <p>This method will try to allocate a slot from one of the local instances, and fall back to
 * non-local instances, if permitted.</p>
 * 
 * @param vertex The vertex to allocate the slot for.
 * @param requestedLocations The locations that are considered local. May be null or empty, if the
 *                           vertex has no location preferences.
 * @param groupAssignment The slot sharing group of the vertex. Mandatory parameter.
 * @param constraint The co-location constraint of the vertex. May be null.
 * @param localOnly Flag to indicate if non-local choices are acceptable.
 * 
 * @return A sub-slot for the given vertex, or {@code null}, if no slot is available.
 */
protected SimpleSlot getNewSlotForSharingGroup(ExecutionVertex vertex,
												Iterable<TaskManagerLocation> requestedLocations,
												SlotSharingGroupAssignment groupAssignment,
												CoLocationConstraint constraint,
												boolean localOnly)
{
	// we need potentially to loop multiple times, because there may be false positives
	// in the set-with-available-instances
	while (true) {
		Pair<Instance, Locality> instanceLocalityPair = findInstance(requestedLocations, localOnly);
		
		if (instanceLocalityPair == null) {
			// nothing is available
			return null;
		}

		final Instance instanceToUse = instanceLocalityPair.getLeft();
		final Locality locality = instanceLocalityPair.getRight();

		try {
			JobVertexID groupID = vertex.getJobvertexId();
			
			// allocate a shared slot from the instance
			SharedSlot sharedSlot = instanceToUse.allocateSharedSlot(groupAssignment);

			// if the instance has further available slots, re-add it to the set of available resources.
			if (instanceToUse.hasResourcesAvailable()) {
				this.instancesWithAvailableResources.put(instanceToUse.getTaskManagerID(), instanceToUse);
			}

			if (sharedSlot != null) {
				// add the shared slot to the assignment group and allocate a sub-slot
				SimpleSlot slot = constraint == null ?
						groupAssignment.addSharedSlotAndAllocateSubSlot(sharedSlot, locality, groupID) :
						groupAssignment.addSharedSlotAndAllocateSubSlot(sharedSlot, locality, constraint);

				if (slot != null) {
					return slot;
				}
				else {
					// could not add and allocate the sub-slot, so release shared slot
					sharedSlot.releaseSlot(new FlinkException("Could not allocate sub-slot."));
				}
			}
		}
		catch (InstanceDiedException e) {
			// the instance died it has not yet been propagated to this scheduler
			// remove the instance from the set of available instances
			removeInstance(instanceToUse);
		}

		// if we failed to get a slot, fall through the loop
	}
}
 
Example 7
Source File: AdaptedRestartPipelinedRegionStrategyNG.java    From flink with Apache License 2.0 4 votes vote down vote up
private ExecutionVertexID getExecutionVertexID(final ExecutionVertex vertex) {
	return new ExecutionVertexID(vertex.getJobvertexId(), vertex.getParallelSubtaskIndex());
}