org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor Java Examples
The following examples show how to use
org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StreamNetworkBenchmarkEnvironment.java From flink with Apache License 2.0 | 6 votes |
private InputGate createInputGate(TaskManagerLocation senderLocation) throws Exception { InputGate[] gates = new InputGate[channels]; for (int channel = 0; channel < channels; ++channel) { final InputGateDeploymentDescriptor gateDescriptor = createInputGateDeploymentDescriptor( senderLocation, channel, location); final InputGate gate = createInputGateWithMetrics(gateFactory, gateDescriptor, channel); gate.setup(); gates[channel] = gate; } if (channels > 1) { return new UnionInputGate(gates); } else { return gates[0]; } }
Example #2
Source File: StreamNetworkBenchmarkEnvironment.java From flink with Apache License 2.0 | 6 votes |
private InputGateDeploymentDescriptor createInputGateDeploymentDescriptor( TaskManagerLocation senderLocation, int gateIndex, ResourceID localLocation) { final ShuffleDescriptor[] channelDescriptors = new ShuffleDescriptor[channels]; for (int channelIndex = 0; channelIndex < channels; ++channelIndex) { channelDescriptors[channelIndex] = createShuffleDescriptor( localMode, partitionIds[gateIndex], localLocation, senderLocation, channelIndex); } return new InputGateDeploymentDescriptor( dataSetID, ResultPartitionType.PIPELINED_BOUNDED, // 0 is used because TestRemoteInputChannel and TestLocalInputChannel will // ignore this and use channelIndex instead when requesting a subpartition 0, channelDescriptors); }
Example #3
Source File: StreamNetworkBenchmarkEnvironment.java From flink with Apache License 2.0 | 6 votes |
private InputGate createInputGate(TaskManagerLocation senderLocation) throws Exception { IndexedInputGate[] gates = new IndexedInputGate[partitionIds.length]; for (int gateIndex = 0; gateIndex < gates.length; ++gateIndex) { final InputGateDeploymentDescriptor gateDescriptor = createInputGateDeploymentDescriptor( senderLocation, gateIndex, location); final IndexedInputGate gate = createInputGateWithMetrics(gateFactory, gateDescriptor, gateIndex); gate.setup(); gates[gateIndex] = gate; } if (gates.length > 1) { return new UnionInputGate(gates); } else { return gates[0]; } }
Example #4
Source File: StreamNetworkBenchmarkEnvironment.java From flink with Apache License 2.0 | 6 votes |
private InputGateDeploymentDescriptor createInputGateDeploymentDescriptor( TaskManagerLocation senderLocation, int consumedSubpartitionIndex, ResourceID localLocation) { final ShuffleDescriptor[] channelDescriptors = Arrays.stream(partitionIds) .map(partitionId -> createShuffleDescriptor(localMode, partitionId, localLocation, senderLocation, consumedSubpartitionIndex)) .toArray(ShuffleDescriptor[]::new); return new InputGateDeploymentDescriptor( dataSetID, ResultPartitionType.PIPELINED_BOUNDED, consumedSubpartitionIndex, channelDescriptors); }
Example #5
Source File: TaskExecutorSubmissionTest.java From flink with Apache License 2.0 | 6 votes |
private TaskDeploymentDescriptor createTestTaskDeploymentDescriptor( String taskName, ExecutionAttemptID eid, Class<? extends AbstractInvokable> abstractInvokable, int maxNumberOfSubtasks, List<ResultPartitionDeploymentDescriptor> producedPartitions, List<InputGateDeploymentDescriptor> inputGates ) throws IOException { Preconditions.checkNotNull(producedPartitions); Preconditions.checkNotNull(inputGates); return createTaskDeploymentDescriptor( jobId, testName.getMethodName(), eid, new SerializedValue<>(new ExecutionConfig()), taskName, maxNumberOfSubtasks, 0, 1, 0, new Configuration(), new Configuration(), abstractInvokable.getName(), producedPartitions, inputGates, Collections.emptyList(), Collections.emptyList(), 0); }
Example #6
Source File: TaskExecutorSubmissionTest.java From flink with Apache License 2.0 | 6 votes |
private TaskDeploymentDescriptor createTestTaskDeploymentDescriptor( String taskName, ExecutionAttemptID eid, Class<? extends AbstractInvokable> abstractInvokable, int maxNumberOfSubtasks, Collection<ResultPartitionDeploymentDescriptor> producedPartitions, Collection<InputGateDeploymentDescriptor> inputGates ) throws IOException { Preconditions.checkNotNull(producedPartitions); Preconditions.checkNotNull(inputGates); return createTaskDeploymentDescriptor( jobId, testName.getMethodName(), eid, new SerializedValue<>(new ExecutionConfig()), taskName, maxNumberOfSubtasks, 0, 1, 0, new Configuration(), new Configuration(), abstractInvokable.getName(), producedPartitions, inputGates, Collections.emptyList(), Collections.emptyList(), 0); }
Example #7
Source File: SingleInputGateFactory.java From flink with Apache License 2.0 | 5 votes |
/** * Creates an input gate and all of its input channels. */ public SingleInputGate create( @Nonnull String owningTaskName, @Nonnull InputGateDeploymentDescriptor igdd, @Nonnull PartitionProducerStateProvider partitionProducerStateProvider, @Nonnull InputChannelMetrics metrics) { SupplierWithException<BufferPool, IOException> bufferPoolFactory = createBufferPoolFactory( networkBufferPool, isCreditBased, networkBuffersPerChannel, floatingNetworkBuffersPerGate, igdd.getShuffleDescriptors().length, igdd.getConsumedPartitionType()); SingleInputGate inputGate = new SingleInputGate( owningTaskName, igdd.getConsumedResultId(), igdd.getConsumedPartitionType(), igdd.getConsumedSubpartitionIndex(), igdd.getShuffleDescriptors().length, partitionProducerStateProvider, isCreditBased, bufferPoolFactory); createInputChannels(owningTaskName, igdd, inputGate, metrics); return inputGate; }
Example #8
Source File: StreamNetworkBenchmarkEnvironment.java From flink with Apache License 2.0 | 5 votes |
private IndexedInputGate createInputGateWithMetrics( SingleInputGateFactory gateFactory, InputGateDeploymentDescriptor gateDescriptor, int gateIndex) { final SingleInputGate singleGate = gateFactory.create( "receiving task[" + gateIndex + "]", gateIndex, gateDescriptor, SingleInputGateBuilder.NO_OP_PRODUCER_CHECKER, InputChannelTestUtils.newUnregisteredInputChannelMetrics()); return new InputGateWithMetrics(singleGate, new SimpleCounter()); }
Example #9
Source File: SingleInputGateFactory.java From flink with Apache License 2.0 | 5 votes |
private void createInputChannels( String owningTaskName, InputGateDeploymentDescriptor inputGateDeploymentDescriptor, SingleInputGate inputGate, InputChannelMetrics metrics) { ShuffleDescriptor[] shuffleDescriptors = inputGateDeploymentDescriptor.getShuffleDescriptors(); // Create the input channels. There is one input channel for each consumed partition. InputChannel[] inputChannels = new InputChannel[shuffleDescriptors.length]; ChannelStatistics channelStatistics = new ChannelStatistics(); for (int i = 0; i < inputChannels.length; i++) { inputChannels[i] = createInputChannel( inputGate, i, shuffleDescriptors[i], channelStatistics, metrics); ResultPartitionID resultPartitionID = inputChannels[i].getPartitionId(); inputGate.setInputChannel(resultPartitionID.getPartitionId(), inputChannels[i]); } LOG.debug("{}: Created {} input channels ({}).", owningTaskName, inputChannels.length, channelStatistics); }
Example #10
Source File: TaskTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testExecutionFailsInNetworkRegistrationForGates() throws Exception { final ShuffleDescriptor dummyChannel = NettyShuffleDescriptorBuilder.newBuilder().buildRemote(); final InputGateDeploymentDescriptor dummyGate = new InputGateDeploymentDescriptor( new IntermediateDataSetID(), ResultPartitionType.PIPELINED, 0, new ShuffleDescriptor[] { dummyChannel }); testExecutionFailsInNetworkRegistration(Collections.emptyList(), Collections.singleton(dummyGate)); }
Example #11
Source File: TaskTest.java From flink with Apache License 2.0 | 5 votes |
private void testExecutionFailsInNetworkRegistration( Collection<ResultPartitionDeploymentDescriptor> resultPartitions, Collection<InputGateDeploymentDescriptor> inputGates) throws Exception { final String errorMessage = "Network buffer pool has already been destroyed."; final ResultPartitionConsumableNotifier consumableNotifier = new NoOpResultPartitionConsumableNotifier(); final PartitionProducerStateChecker partitionProducerStateChecker = mock(PartitionProducerStateChecker.class); final QueuedNoOpTaskManagerActions taskManagerActions = new QueuedNoOpTaskManagerActions(); final Task task = new TestTaskBuilder(shuffleEnvironment) .setTaskManagerActions(taskManagerActions) .setConsumableNotifier(consumableNotifier) .setPartitionProducerStateChecker(partitionProducerStateChecker) .setResultPartitions(resultPartitions) .setInputGates(inputGates) .build(); // shut down the network to make the following task registration failure shuffleEnvironment.close(); // should fail task.run(); // verify final state assertEquals(ExecutionState.FAILED, task.getExecutionState()); assertTrue(task.isCanceledOrFailed()); assertTrue(task.getFailureCause().getMessage().contains(errorMessage)); taskManagerActions.validateListenerMessage(ExecutionState.FAILED, task, new IllegalStateException(errorMessage)); }
Example #12
Source File: TaskExecutorSubmissionTest.java From flink with Apache License 2.0 | 5 votes |
private TaskDeploymentDescriptor createReceiver(NettyShuffleDescriptor shuffleDescriptor) throws IOException { InputGateDeploymentDescriptor inputGateDeploymentDescriptor = new InputGateDeploymentDescriptor( new IntermediateDataSetID(), ResultPartitionType.PIPELINED, 0, new ShuffleDescriptor[] {shuffleDescriptor}); return createTestTaskDeploymentDescriptor( "Receiver", new ExecutionAttemptID(), TestingAbstractInvokables.Receiver.class, 1, Collections.emptyList(), Collections.singletonList(inputGateDeploymentDescriptor)); }
Example #13
Source File: SingleInputGateFactory.java From flink with Apache License 2.0 | 5 votes |
private void createInputChannels( String owningTaskName, InputGateDeploymentDescriptor inputGateDeploymentDescriptor, SingleInputGate inputGate, InputChannelMetrics metrics) { ShuffleDescriptor[] shuffleDescriptors = inputGateDeploymentDescriptor.getShuffleDescriptors(); // Create the input channels. There is one input channel for each consumed partition. InputChannel[] inputChannels = new InputChannel[shuffleDescriptors.length]; ChannelStatistics channelStatistics = new ChannelStatistics(); for (int i = 0; i < inputChannels.length; i++) { inputChannels[i] = createInputChannel( inputGate, i, shuffleDescriptors[i], channelStatistics, metrics); } inputGate.setInputChannels(inputChannels); LOG.debug("{}: Created {} input channels ({}).", owningTaskName, inputChannels.length, channelStatistics); }
Example #14
Source File: SingleInputGateFactory.java From flink with Apache License 2.0 | 5 votes |
/** * Creates an input gate and all of its input channels. */ public SingleInputGate create( @Nonnull String owningTaskName, int gateIndex, @Nonnull InputGateDeploymentDescriptor igdd, @Nonnull PartitionProducerStateProvider partitionProducerStateProvider, @Nonnull InputChannelMetrics metrics) { SupplierWithException<BufferPool, IOException> bufferPoolFactory = createBufferPoolFactory( networkBufferPool, networkBuffersPerChannel, floatingNetworkBuffersPerGate, igdd.getShuffleDescriptors().length, igdd.getConsumedPartitionType()); BufferDecompressor bufferDecompressor = null; if (igdd.getConsumedPartitionType().isBlocking() && blockingShuffleCompressionEnabled) { bufferDecompressor = new BufferDecompressor(networkBufferSize, compressionCodec); } SingleInputGate inputGate = new SingleInputGate( owningTaskName, gateIndex, igdd.getConsumedResultId(), igdd.getConsumedPartitionType(), igdd.getConsumedSubpartitionIndex(), igdd.getShuffleDescriptors().length, partitionProducerStateProvider, bufferPoolFactory, bufferDecompressor, networkBufferPool); createInputChannels(owningTaskName, igdd, inputGate, metrics); return inputGate; }
Example #15
Source File: NettyShuffleEnvironment.java From flink with Apache License 2.0 | 5 votes |
@Override public List<SingleInputGate> createInputGates( ShuffleIOOwnerContext ownerContext, PartitionProducerStateProvider partitionProducerStateProvider, List<InputGateDeploymentDescriptor> inputGateDeploymentDescriptors) { synchronized (lock) { Preconditions.checkState(!isClosed, "The NettyShuffleEnvironment has already been shut down."); MetricGroup networkInputGroup = ownerContext.getInputGroup(); @SuppressWarnings("deprecation") InputChannelMetrics inputChannelMetrics = new InputChannelMetrics(networkInputGroup, ownerContext.getParentGroup()); SingleInputGate[] inputGates = new SingleInputGate[inputGateDeploymentDescriptors.size()]; for (int gateIndex = 0; gateIndex < inputGates.length; gateIndex++) { final InputGateDeploymentDescriptor igdd = inputGateDeploymentDescriptors.get(gateIndex); SingleInputGate inputGate = singleInputGateFactory.create( ownerContext.getOwnerName(), gateIndex, igdd, partitionProducerStateProvider, inputChannelMetrics); InputGateID id = new InputGateID(igdd.getConsumedResultId(), ownerContext.getExecutionAttemptID()); inputGatesById.put(id, inputGate); inputGate.getCloseFuture().thenRun(() -> inputGatesById.remove(id)); inputGates[gateIndex] = inputGate; } registerInputMetrics(config.isNetworkDetailedMetrics(), networkInputGroup, inputGates); return Arrays.asList(inputGates); } }
Example #16
Source File: StreamNetworkBenchmarkEnvironment.java From flink with Apache License 2.0 | 5 votes |
private InputGate createInputGateWithMetrics( SingleInputGateFactory gateFactory, InputGateDeploymentDescriptor gateDescriptor, int channelIndex) { final SingleInputGate singleGate = gateFactory.create( "receiving task[" + channelIndex + "]", gateDescriptor, SingleInputGateBuilder.NO_OP_PRODUCER_CHECKER, InputChannelTestUtils.newUnregisteredInputChannelMetrics()); return new InputGateWithMetrics(singleGate, new SimpleCounter()); }
Example #17
Source File: SingleInputGateTest.java From flink with Apache License 2.0 | 5 votes |
private static Map<InputGateID, SingleInputGate> createInputGateWithLocalChannels( NettyShuffleEnvironment network, int numberOfGates, @SuppressWarnings("SameParameterValue") int numberOfLocalChannels) { ShuffleDescriptor[] channelDescs = new NettyShuffleDescriptor[numberOfLocalChannels]; for (int i = 0; i < numberOfLocalChannels; i++) { channelDescs[i] = createRemoteWithIdAndLocation(new IntermediateResultPartitionID(), ResourceID.generate()); } InputGateDeploymentDescriptor[] gateDescs = new InputGateDeploymentDescriptor[numberOfGates]; IntermediateDataSetID[] ids = new IntermediateDataSetID[numberOfGates]; for (int i = 0; i < numberOfGates; i++) { ids[i] = new IntermediateDataSetID(); gateDescs[i] = new InputGateDeploymentDescriptor( ids[i], ResultPartitionType.PIPELINED, 0, channelDescs); } ExecutionAttemptID consumerID = new ExecutionAttemptID(); SingleInputGate[] gates = network.createInputGates( network.createShuffleIOOwnerContext("", consumerID, new UnregisteredMetricsGroup()), SingleInputGateBuilder.NO_OP_PRODUCER_CHECKER, Arrays.asList(gateDescs)).toArray(new SingleInputGate[] {}); Map<InputGateID, SingleInputGate> inputGatesById = new HashMap<>(); for (int i = 0; i < numberOfGates; i++) { inputGatesById.put(new InputGateID(ids[i], consumerID), gates[i]); } return inputGatesById; }
Example #18
Source File: NettyShuffleEnvironment.java From flink with Apache License 2.0 | 5 votes |
@Override public Collection<SingleInputGate> createInputGates( ShuffleIOOwnerContext ownerContext, PartitionProducerStateProvider partitionProducerStateProvider, Collection<InputGateDeploymentDescriptor> inputGateDeploymentDescriptors) { synchronized (lock) { Preconditions.checkState(!isClosed, "The NettyShuffleEnvironment has already been shut down."); MetricGroup networkInputGroup = ownerContext.getInputGroup(); @SuppressWarnings("deprecation") InputChannelMetrics inputChannelMetrics = new InputChannelMetrics(networkInputGroup, ownerContext.getParentGroup()); SingleInputGate[] inputGates = new SingleInputGate[inputGateDeploymentDescriptors.size()]; int counter = 0; for (InputGateDeploymentDescriptor igdd : inputGateDeploymentDescriptors) { SingleInputGate inputGate = singleInputGateFactory.create( ownerContext.getOwnerName(), igdd, partitionProducerStateProvider, inputChannelMetrics); InputGateID id = new InputGateID(igdd.getConsumedResultId(), ownerContext.getExecutionAttemptID()); inputGatesById.put(id, inputGate); inputGate.getCloseFuture().thenRun(() -> inputGatesById.remove(id)); inputGates[counter++] = inputGate; } registerInputMetrics(config.isNetworkDetailedMetrics(), config.isCreditBased(), networkInputGroup, inputGates); return Arrays.asList(inputGates); } }
Example #19
Source File: TaskTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testExecutionFailsInNetworkRegistrationForGates() throws Exception { final ShuffleDescriptor dummyChannel = NettyShuffleDescriptorBuilder.newBuilder().buildRemote(); final InputGateDeploymentDescriptor dummyGate = new InputGateDeploymentDescriptor( new IntermediateDataSetID(), ResultPartitionType.PIPELINED, 0, new ShuffleDescriptor[] { dummyChannel }); testExecutionFailsInNetworkRegistration(Collections.emptyList(), Collections.singletonList(dummyGate)); }
Example #20
Source File: TaskTest.java From flink with Apache License 2.0 | 5 votes |
private void testExecutionFailsInNetworkRegistration( List<ResultPartitionDeploymentDescriptor> resultPartitions, List<InputGateDeploymentDescriptor> inputGates) throws Exception { final String errorMessage = "Network buffer pool has already been destroyed."; final ResultPartitionConsumableNotifier consumableNotifier = new NoOpResultPartitionConsumableNotifier(); final PartitionProducerStateChecker partitionProducerStateChecker = mock(PartitionProducerStateChecker.class); final QueuedNoOpTaskManagerActions taskManagerActions = new QueuedNoOpTaskManagerActions(); final Task task = new TestTaskBuilder(shuffleEnvironment) .setTaskManagerActions(taskManagerActions) .setConsumableNotifier(consumableNotifier) .setPartitionProducerStateChecker(partitionProducerStateChecker) .setResultPartitions(resultPartitions) .setInputGates(inputGates) .build(); // shut down the network to make the following task registration failure shuffleEnvironment.close(); // should fail task.run(); // verify final state assertEquals(ExecutionState.FAILED, task.getExecutionState()); assertTrue(task.isCanceledOrFailed()); assertTrue(task.getFailureCause().getMessage().contains(errorMessage)); taskManagerActions.validateListenerMessage(ExecutionState.FAILED, task, new IllegalStateException(errorMessage)); }
Example #21
Source File: TaskExecutorSubmissionTest.java From flink with Apache License 2.0 | 5 votes |
private TaskDeploymentDescriptor createReceiver(NettyShuffleDescriptor shuffleDescriptor) throws IOException { InputGateDeploymentDescriptor inputGateDeploymentDescriptor = new InputGateDeploymentDescriptor( new IntermediateDataSetID(), ResultPartitionType.PIPELINED, 0, new ShuffleDescriptor[] {shuffleDescriptor}); return createTestTaskDeploymentDescriptor( "Receiver", new ExecutionAttemptID(), TestingAbstractInvokables.Receiver.class, 1, Collections.emptyList(), Collections.singletonList(inputGateDeploymentDescriptor)); }
Example #22
Source File: SingleInputGateTest.java From flink with Apache License 2.0 | 5 votes |
private static Map<InputGateID, SingleInputGate> createInputGateWithLocalChannels( NettyShuffleEnvironment network, int numberOfGates, @SuppressWarnings("SameParameterValue") int numberOfLocalChannels) { ShuffleDescriptor[] channelDescs = new NettyShuffleDescriptor[numberOfLocalChannels]; for (int i = 0; i < numberOfLocalChannels; i++) { channelDescs[i] = createRemoteWithIdAndLocation(new IntermediateResultPartitionID(), ResourceID.generate()); } InputGateDeploymentDescriptor[] gateDescs = new InputGateDeploymentDescriptor[numberOfGates]; IntermediateDataSetID[] ids = new IntermediateDataSetID[numberOfGates]; for (int i = 0; i < numberOfGates; i++) { ids[i] = new IntermediateDataSetID(); gateDescs[i] = new InputGateDeploymentDescriptor( ids[i], ResultPartitionType.PIPELINED, 0, channelDescs); } ExecutionAttemptID consumerID = new ExecutionAttemptID(); SingleInputGate[] gates = network.createInputGates( network.createShuffleIOOwnerContext("", consumerID, new UnregisteredMetricsGroup()), SingleInputGateBuilder.NO_OP_PRODUCER_CHECKER, asList(gateDescs)).toArray(new SingleInputGate[] {}); Map<InputGateID, SingleInputGate> inputGatesById = new HashMap<>(); for (int i = 0; i < numberOfGates; i++) { inputGatesById.put(new InputGateID(ids[i], consumerID), gates[i]); } return inputGatesById; }
Example #23
Source File: TaskAsyncCallTest.java From flink with Apache License 2.0 | 4 votes |
private Task createTask(Class<? extends AbstractInvokable> invokableClass) throws Exception { final TestingClassLoaderLease classLoaderHandle = TestingClassLoaderLease.newBuilder() .setGetOrResolveClassLoaderFunction((permanentBlobKeys, urls) -> new TestUserCodeClassLoader()) .build(); ResultPartitionConsumableNotifier consumableNotifier = new NoOpResultPartitionConsumableNotifier(); PartitionProducerStateChecker partitionProducerStateChecker = mock(PartitionProducerStateChecker.class); Executor executor = mock(Executor.class); TaskMetricGroup taskMetricGroup = UnregisteredMetricGroups.createUnregisteredTaskMetricGroup(); JobInformation jobInformation = new JobInformation( new JobID(), "Job Name", new SerializedValue<>(new ExecutionConfig()), new Configuration(), Collections.emptyList(), Collections.emptyList()); TaskInformation taskInformation = new TaskInformation( new JobVertexID(), "Test Task", 1, 1, invokableClass.getName(), new Configuration()); return new Task( jobInformation, taskInformation, new ExecutionAttemptID(), new AllocationID(), 0, 0, Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), 0, mock(MemoryManager.class), mock(IOManager.class), shuffleEnvironment, new KvStateService(new KvStateRegistry(), null, null), mock(BroadcastVariableManager.class), new TaskEventDispatcher(), ExternalResourceInfoProvider.NO_EXTERNAL_RESOURCES, new TestTaskStateManager(), mock(TaskManagerActions.class), mock(InputSplitProvider.class), mock(CheckpointResponder.class), new NoOpTaskOperatorEventGateway(), new TestGlobalAggregateManager(), classLoaderHandle, mock(FileCache.class), new TestingTaskManagerRuntimeInfo(), taskMetricGroup, consumableNotifier, partitionProducerStateChecker, executor); }
Example #24
Source File: TestTaskBuilder.java From flink with Apache License 2.0 | 4 votes |
public TestTaskBuilder setInputGates(List<InputGateDeploymentDescriptor> inputGates) { this.inputGates = inputGates; return this; }
Example #25
Source File: TaskExecutorSubmissionTest.java From flink with Apache License 2.0 | 4 votes |
static TaskDeploymentDescriptor createTaskDeploymentDescriptor( JobID jobId, String jobName, ExecutionAttemptID executionAttemptId, SerializedValue<ExecutionConfig> serializedExecutionConfig, String taskName, int maxNumberOfSubtasks, int subtaskIndex, int numberOfSubtasks, int attemptNumber, Configuration jobConfiguration, Configuration taskConfiguration, String invokableClassName, List<ResultPartitionDeploymentDescriptor> producedPartitions, List<InputGateDeploymentDescriptor> inputGates, Collection<PermanentBlobKey> requiredJarFiles, Collection<URL> requiredClasspaths, int targetSlotNumber) throws IOException { JobInformation jobInformation = new JobInformation( jobId, jobName, serializedExecutionConfig, jobConfiguration, requiredJarFiles, requiredClasspaths); TaskInformation taskInformation = new TaskInformation( new JobVertexID(), taskName, numberOfSubtasks, maxNumberOfSubtasks, invokableClassName, taskConfiguration); SerializedValue<JobInformation> serializedJobInformation = new SerializedValue<>(jobInformation); SerializedValue<TaskInformation> serializedJobVertexInformation = new SerializedValue<>(taskInformation); return new TaskDeploymentDescriptor( jobId, new TaskDeploymentDescriptor.NonOffloaded<>(serializedJobInformation), new TaskDeploymentDescriptor.NonOffloaded<>(serializedJobVertexInformation), executionAttemptId, new AllocationID(), subtaskIndex, attemptNumber, targetSlotNumber, null, producedPartitions, inputGates); }
Example #26
Source File: JvmExitOnFatalErrorTest.java From flink with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { System.err.println("creating task"); // we suppress process exits via errors here to not // have a test that exits accidentally due to a programming error try { final Configuration taskManagerConfig = new Configuration(); taskManagerConfig.setBoolean(TaskManagerOptions.KILL_ON_OUT_OF_MEMORY, true); final JobID jid = new JobID(); final AllocationID allocationID = new AllocationID(); final JobVertexID jobVertexId = new JobVertexID(); final ExecutionAttemptID executionAttemptID = new ExecutionAttemptID(); final AllocationID slotAllocationId = new AllocationID(); final SerializedValue<ExecutionConfig> execConfig = new SerializedValue<>(new ExecutionConfig()); final JobInformation jobInformation = new JobInformation( jid, "Test Job", execConfig, new Configuration(), Collections.emptyList(), Collections.emptyList()); final TaskInformation taskInformation = new TaskInformation( jobVertexId, "Test Task", 1, 1, OomInvokable.class.getName(), new Configuration()); final MemoryManager memoryManager = MemoryManagerBuilder.newBuilder().setMemorySize(1024 * 1024).build(); final IOManager ioManager = new IOManagerAsync(); final ShuffleEnvironment<?, ?> shuffleEnvironment = new NettyShuffleEnvironmentBuilder().build(); final Configuration copiedConf = new Configuration(taskManagerConfig); final TaskManagerRuntimeInfo tmInfo = TaskManagerConfiguration .fromConfiguration( taskManagerConfig, TaskExecutorResourceUtils.resourceSpecFromConfigForLocalExecution(copiedConf), InetAddress.getLoopbackAddress().getHostAddress()); final Executor executor = Executors.newCachedThreadPool(); final TaskLocalStateStore localStateStore = new TaskLocalStateStoreImpl( jid, allocationID, jobVertexId, 0, TestLocalRecoveryConfig.disabled(), executor); final TaskStateManager slotStateManager = new TaskStateManagerImpl( jid, executionAttemptID, localStateStore, null, mock(CheckpointResponder.class)); Task task = new Task( jobInformation, taskInformation, executionAttemptID, slotAllocationId, 0, // subtaskIndex 0, // attemptNumber Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), 0, // targetSlotNumber memoryManager, ioManager, shuffleEnvironment, new KvStateService(new KvStateRegistry(), null, null), new BroadcastVariableManager(), new TaskEventDispatcher(), ExternalResourceInfoProvider.NO_EXTERNAL_RESOURCES, slotStateManager, new NoOpTaskManagerActions(), new NoOpInputSplitProvider(), NoOpCheckpointResponder.INSTANCE, new NoOpTaskOperatorEventGateway(), new TestGlobalAggregateManager(), TestingClassLoaderLease.newBuilder().build(), new FileCache(tmInfo.getTmpDirectories(), VoidPermanentBlobService.INSTANCE), tmInfo, UnregisteredMetricGroups.createUnregisteredTaskMetricGroup(), new NoOpResultPartitionConsumableNotifier(), new NoOpPartitionProducerStateChecker(), executor); System.err.println("starting task thread"); task.startTaskThread(); } catch (Throwable t) { System.err.println("ERROR STARTING TASK"); t.printStackTrace(); } System.err.println("parking the main thread"); CommonTestUtils.blockForeverNonInterruptibly(); }
Example #27
Source File: TaskExecutorPartitionTrackerImplTest.java From flink with Apache License 2.0 | 4 votes |
@Override public List<SingleInputGate> createInputGates(ShuffleIOOwnerContext ownerContext, PartitionProducerStateProvider partitionProducerStateProvider, List<InputGateDeploymentDescriptor> inputGateDeploymentDescriptors) { return backingShuffleEnvironment.createInputGates(ownerContext, partitionProducerStateProvider, inputGateDeploymentDescriptors); }
Example #28
Source File: SingleInputGateTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests request back off configuration is correctly forwarded to the channels. */ @Test public void testRequestBackoffConfiguration() throws Exception { IntermediateResultPartitionID[] partitionIds = new IntermediateResultPartitionID[] { new IntermediateResultPartitionID(), new IntermediateResultPartitionID(), new IntermediateResultPartitionID() }; ResourceID localLocation = ResourceID.generate(); ShuffleDescriptor[] channelDescs = new ShuffleDescriptor[]{ // Local createRemoteWithIdAndLocation(partitionIds[0], localLocation), // Remote createRemoteWithIdAndLocation(partitionIds[1], ResourceID.generate()), // Unknown new UnknownShuffleDescriptor(new ResultPartitionID(partitionIds[2], new ExecutionAttemptID()))}; InputGateDeploymentDescriptor gateDesc = new InputGateDeploymentDescriptor( new IntermediateDataSetID(), ResultPartitionType.PIPELINED, 0, channelDescs); int initialBackoff = 137; int maxBackoff = 1001; final NettyShuffleEnvironment netEnv = new NettyShuffleEnvironmentBuilder() .setPartitionRequestInitialBackoff(initialBackoff) .setPartitionRequestMaxBackoff(maxBackoff) .build(); SingleInputGate gate = new SingleInputGateFactory( localLocation, netEnv.getConfiguration(), netEnv.getConnectionManager(), netEnv.getResultPartitionManager(), new TaskEventDispatcher(), netEnv.getNetworkBufferPool()) .create( "TestTask", 0, gateDesc, SingleInputGateBuilder.NO_OP_PRODUCER_CHECKER, InputChannelTestUtils.newUnregisteredInputChannelMetrics()); gate.convertRecoveredInputChannels(); try { assertEquals(gateDesc.getConsumedPartitionType(), gate.getConsumedPartitionType()); Map<IntermediateResultPartitionID, InputChannel> channelMap = gate.getInputChannels(); assertEquals(3, channelMap.size()); InputChannel localChannel = channelMap.get(partitionIds[0]); assertEquals(LocalInputChannel.class, localChannel.getClass()); InputChannel remoteChannel = channelMap.get(partitionIds[1]); assertEquals(RemoteInputChannel.class, remoteChannel.getClass()); InputChannel unknownChannel = channelMap.get(partitionIds[2]); assertEquals(UnknownInputChannel.class, unknownChannel.getClass()); InputChannel[] channels = new InputChannel[] {localChannel, remoteChannel, unknownChannel}; for (InputChannel ch : channels) { assertEquals(0, ch.getCurrentBackoff()); assertTrue(ch.increaseBackoff()); assertEquals(initialBackoff, ch.getCurrentBackoff()); assertTrue(ch.increaseBackoff()); assertEquals(initialBackoff * 2, ch.getCurrentBackoff()); assertTrue(ch.increaseBackoff()); assertEquals(initialBackoff * 2 * 2, ch.getCurrentBackoff()); assertTrue(ch.increaseBackoff()); assertEquals(maxBackoff, ch.getCurrentBackoff()); assertFalse(ch.increaseBackoff()); } } finally { gate.close(); netEnv.close(); } }
Example #29
Source File: StreamTaskTerminationTest.java From flink with Apache License 2.0 | 4 votes |
/** * FLINK-6833 * * <p>Tests that a finished stream task cannot be failed by an asynchronous checkpointing operation after * the stream task has stopped running. */ @Test public void testConcurrentAsyncCheckpointCannotFailFinishedStreamTask() throws Exception { final Configuration taskConfiguration = new Configuration(); final StreamConfig streamConfig = new StreamConfig(taskConfiguration); final NoOpStreamOperator<Long> noOpStreamOperator = new NoOpStreamOperator<>(); final StateBackend blockingStateBackend = new BlockingStateBackend(); streamConfig.setStreamOperator(noOpStreamOperator); streamConfig.setOperatorID(new OperatorID()); streamConfig.setStateBackend(blockingStateBackend); final long checkpointId = 0L; final long checkpointTimestamp = 0L; final JobInformation jobInformation = new JobInformation( new JobID(), "Test Job", new SerializedValue<>(new ExecutionConfig()), new Configuration(), Collections.emptyList(), Collections.emptyList()); final TaskInformation taskInformation = new TaskInformation( new JobVertexID(), "Test Task", 1, 1, BlockingStreamTask.class.getName(), taskConfiguration); final TaskManagerRuntimeInfo taskManagerRuntimeInfo = new TestingTaskManagerRuntimeInfo(); final ShuffleEnvironment<?, ?> shuffleEnvironment = new NettyShuffleEnvironmentBuilder().build(); final Task task = new Task( jobInformation, taskInformation, new ExecutionAttemptID(), new AllocationID(), 0, 0, Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), 0, MemoryManagerBuilder.newBuilder().setMemorySize(32L * 1024L).build(), new IOManagerAsync(), shuffleEnvironment, new KvStateService(new KvStateRegistry(), null, null), mock(BroadcastVariableManager.class), new TaskEventDispatcher(), ExternalResourceInfoProvider.NO_EXTERNAL_RESOURCES, new TestTaskStateManager(), mock(TaskManagerActions.class), mock(InputSplitProvider.class), mock(CheckpointResponder.class), new NoOpTaskOperatorEventGateway(), new TestGlobalAggregateManager(), TestingClassLoaderLease.newBuilder().build(), mock(FileCache.class), taskManagerRuntimeInfo, UnregisteredMetricGroups.createUnregisteredTaskMetricGroup(), new NoOpResultPartitionConsumableNotifier(), mock(PartitionProducerStateChecker.class), Executors.directExecutor()); CompletableFuture<Void> taskRun = CompletableFuture.runAsync( () -> task.run(), TestingUtils.defaultExecutor()); // wait until the stream task started running RUN_LATCH.await(); // trigger a checkpoint task.triggerCheckpointBarrier(checkpointId, checkpointTimestamp, CheckpointOptions.forCheckpointWithDefaultLocation(), false); // wait until the task has completed execution taskRun.get(); // check that no failure occurred if (task.getFailureCause() != null) { throw new Exception("Task failed", task.getFailureCause()); } // check that we have entered the finished state assertEquals(ExecutionState.FINISHED, task.getExecutionState()); }
Example #30
Source File: SynchronousCheckpointITCase.java From flink with Apache License 2.0 | 4 votes |
private Task createTask(Class<? extends AbstractInvokable> invokableClass) throws Exception { ResultPartitionConsumableNotifier consumableNotifier = new NoOpResultPartitionConsumableNotifier(); PartitionProducerStateChecker partitionProducerStateChecker = mock(PartitionProducerStateChecker.class); Executor executor = mock(Executor.class); ShuffleEnvironment<?, ?> shuffleEnvironment = new NettyShuffleEnvironmentBuilder().build(); TaskMetricGroup taskMetricGroup = UnregisteredMetricGroups.createUnregisteredTaskMetricGroup(); JobInformation jobInformation = new JobInformation( new JobID(), "Job Name", new SerializedValue<>(new ExecutionConfig()), new Configuration(), Collections.emptyList(), Collections.emptyList()); TaskInformation taskInformation = new TaskInformation( new JobVertexID(), "Test Task", 1, 1, invokableClass.getName(), new Configuration()); return new Task( jobInformation, taskInformation, new ExecutionAttemptID(), new AllocationID(), 0, 0, Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), 0, mock(MemoryManager.class), mock(IOManager.class), shuffleEnvironment, new KvStateService(new KvStateRegistry(), null, null), mock(BroadcastVariableManager.class), new TaskEventDispatcher(), ExternalResourceInfoProvider.NO_EXTERNAL_RESOURCES, new TestTaskStateManager(), mock(TaskManagerActions.class), mock(InputSplitProvider.class), mock(CheckpointResponder.class), new NoOpTaskOperatorEventGateway(), new TestGlobalAggregateManager(), TestingClassLoaderLease.newBuilder().build(), mock(FileCache.class), new TestingTaskManagerRuntimeInfo(), taskMetricGroup, consumableNotifier, partitionProducerStateChecker, executor); }