org.apache.flink.runtime.jobgraph.OperatorID Java Examples
The following examples show how to use
org.apache.flink.runtime.jobgraph.OperatorID.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TaskStateSnapshotTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void discardState() throws Exception { TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot(); OperatorID operatorID_1 = new OperatorID(); OperatorID operatorID_2 = new OperatorID(); OperatorSubtaskState operatorSubtaskState_1 = mock(OperatorSubtaskState.class); OperatorSubtaskState operatorSubtaskState_2 = mock(OperatorSubtaskState.class); taskStateSnapshot.putSubtaskStateByOperatorID(operatorID_1, operatorSubtaskState_1); taskStateSnapshot.putSubtaskStateByOperatorID(operatorID_2, operatorSubtaskState_2); taskStateSnapshot.discardState(); verify(operatorSubtaskState_1).discardState(); verify(operatorSubtaskState_2).discardState(); }
Example #2
Source File: StatefulOperatorChainedTaskTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testMultipleStatefulOperatorChainedSnapshotAndRestore() throws Exception { OperatorID headOperatorID = new OperatorID(42L, 42L); OperatorID tailOperatorID = new OperatorID(44L, 44L); JobManagerTaskRestore restore = createRunAndCheckpointOperatorChain( headOperatorID, new CounterOperator("head"), tailOperatorID, new CounterOperator("tail"), Optional.empty()); TaskStateSnapshot stateHandles = restore.getTaskStateSnapshot(); assertEquals(2, stateHandles.getSubtaskStateMappings().size()); createRunAndCheckpointOperatorChain( headOperatorID, new CounterOperator("head"), tailOperatorID, new CounterOperator("tail"), Optional.of(restore)); assertEquals(new HashSet<>(Arrays.asList(headOperatorID, tailOperatorID)), RESTORED_OPERATORS); }
Example #3
Source File: TestJobClient.java From flink with Apache License 2.0 | 6 votes |
@Override public CompletableFuture<CoordinationResponse> sendCoordinationRequest(OperatorID operatorId, CoordinationRequest request) { if (jobStatus.isGloballyTerminalState()) { throw new RuntimeException("Job terminated"); } Assert.assertEquals(this.operatorId, operatorId); CoordinationResponse response; try { response = handler.handleCoordinationRequest(request).get(); } catch (Exception e) { throw new RuntimeException(e); } if (infoProvider.isJobFinished()) { jobStatus = JobStatus.FINISHED; jobExecutionResult = new JobExecutionResult(jobId, 0, infoProvider.getAccumulatorResults()); } return CompletableFuture.completedFuture(response); }
Example #4
Source File: AbstractUdfStreamOperatorLifecycleTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testLifeCycleFull() throws Exception { ACTUAL_ORDER_TRACKING.clear(); Configuration taskManagerConfig = new Configuration(); StreamConfig cfg = new StreamConfig(new Configuration()); MockSourceFunction srcFun = new MockSourceFunction(); cfg.setStreamOperator(new LifecycleTrackingStreamSource<>(srcFun, true)); cfg.setOperatorID(new OperatorID()); cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); Task task = StreamTaskTest.createTask(SourceStreamTask.class, cfg, taskManagerConfig); task.startTaskThread(); LifecycleTrackingStreamSource.runStarted.await(); // wait for clean termination task.getExecutingThread().join(); assertEquals(ExecutionState.FINISHED, task.getExecutionState()); assertEquals(EXPECTED_CALL_ORDER_FULL, ACTUAL_ORDER_TRACKING); }
Example #5
Source File: CheckpointMetadataLoadingTest.java From flink with Apache License 2.0 | 6 votes |
/** * Tests that savepoint loading fails when there is a max-parallelism mismatch. */ @Test public void testMaxParallelismMismatch() throws Exception { final OperatorID operatorId = new OperatorID(); final int parallelism = 128128; final CompletedCheckpointStorageLocation testSavepoint = createSavepointWithOperatorSubtaskState(242L, operatorId, parallelism); final Map<JobVertexID, ExecutionJobVertex> tasks = createTasks(operatorId, parallelism, parallelism + 1); try { Checkpoints.loadAndValidateCheckpoint(new JobID(), tasks, testSavepoint, cl, false); fail("Did not throw expected Exception"); } catch (IllegalStateException expected) { assertTrue(expected.getMessage().contains("Max parallelism mismatch")); } }
Example #6
Source File: RestoreStreamTaskTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testRestoreWithoutState() throws Exception { OperatorID headOperatorID = new OperatorID(42L, 42L); OperatorID tailOperatorID = new OperatorID(44L, 44L); JobManagerTaskRestore restore = createRunAndCheckpointOperatorChain( headOperatorID, new StatelessOperator(), tailOperatorID, new CounterOperator(), Optional.empty()); TaskStateSnapshot stateHandles = restore.getTaskStateSnapshot(); assertEquals(2, stateHandles.getSubtaskStateMappings().size()); createRunAndCheckpointOperatorChain( headOperatorID, new StatelessOperator(), tailOperatorID, new CounterOperator(), Optional.of(restore)); assertEquals(new HashSet<>(Arrays.asList(headOperatorID, tailOperatorID)), RESTORED_OPERATORS); }
Example #7
Source File: CheckpointCoordinatorTest.java From flink with Apache License 2.0 | 6 votes |
static TaskStateSnapshot mockSubtaskState( JobVertexID jobVertexID, int index, KeyGroupRange keyGroupRange) throws IOException { OperatorStateHandle partitionableState = generatePartitionableStateHandle(jobVertexID, index, 2, 8, false); KeyGroupsStateHandle partitionedKeyGroupState = generateKeyGroupState(jobVertexID, keyGroupRange, false); TaskStateSnapshot subtaskStates = spy(new TaskStateSnapshot()); OperatorSubtaskState subtaskState = spy(new OperatorSubtaskState( partitionableState, null, partitionedKeyGroupState, null) ); subtaskStates.putSubtaskStateByOperatorID(OperatorID.fromJobVertexID(jobVertexID), subtaskState); return subtaskStates; }
Example #8
Source File: TaskStateSnapshotTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void discardState() throws Exception { TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot(); OperatorID operatorID_1 = new OperatorID(); OperatorID operatorID_2 = new OperatorID(); OperatorSubtaskState operatorSubtaskState_1 = mock(OperatorSubtaskState.class); OperatorSubtaskState operatorSubtaskState_2 = mock(OperatorSubtaskState.class); taskStateSnapshot.putSubtaskStateByOperatorID(operatorID_1, operatorSubtaskState_1); taskStateSnapshot.putSubtaskStateByOperatorID(operatorID_2, operatorSubtaskState_2); taskStateSnapshot.discardState(); verify(operatorSubtaskState_1).discardState(); verify(operatorSubtaskState_2).discardState(); }
Example #9
Source File: AbstractUdfStreamOperatorLifecycleTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testLifeCycleFull() throws Exception { ACTUAL_ORDER_TRACKING.clear(); Configuration taskManagerConfig = new Configuration(); StreamConfig cfg = new StreamConfig(new Configuration()); MockSourceFunction srcFun = new MockSourceFunction(); cfg.setStreamOperator(new LifecycleTrackingStreamSource<>(srcFun, true)); cfg.setOperatorID(new OperatorID()); cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); try (ShuffleEnvironment shuffleEnvironment = new NettyShuffleEnvironmentBuilder().build()) { Task task = StreamTaskTest.createTask(SourceStreamTask.class, shuffleEnvironment, cfg, taskManagerConfig); task.startTaskThread(); LifecycleTrackingStreamSource.runStarted.await(); // wait for clean termination task.getExecutingThread().join(); assertEquals(ExecutionState.FINISHED, task.getExecutionState()); assertEquals(EXPECTED_CALL_ORDER_FULL, ACTUAL_ORDER_TRACKING); } }
Example #10
Source File: JobMasterTriggerSavepointITCase.java From flink with Apache License 2.0 | 6 votes |
@Override public boolean triggerCheckpoint(final CheckpointMetaData checkpointMetaData, final CheckpointOptions checkpointOptions, final boolean advanceToEndOfEventTime) { final TaskStateSnapshot checkpointStateHandles = new TaskStateSnapshot(); checkpointStateHandles.putSubtaskStateByOperatorID( OperatorID.fromJobVertexID(getEnvironment().getJobVertexId()), new OperatorSubtaskState()); getEnvironment().acknowledgeCheckpoint( checkpointMetaData.getCheckpointId(), new CheckpointMetrics(), checkpointStateHandles); triggerCheckpointLatch.countDown(); return true; }
Example #11
Source File: CompletedCheckpointStoreTest.java From flink with Apache License 2.0 | 6 votes |
public static TestCompletedCheckpoint createCheckpoint( int id, SharedStateRegistry sharedStateRegistry) throws IOException { int numberOfStates = 4; CheckpointProperties props = CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION); OperatorID operatorID = new OperatorID(); Map<OperatorID, OperatorState> operatorGroupState = new HashMap<>(); OperatorState operatorState = new OperatorState(operatorID, numberOfStates, numberOfStates); operatorGroupState.put(operatorID, operatorState); for (int i = 0; i < numberOfStates; i++) { OperatorSubtaskState subtaskState = new TestOperatorSubtaskState(); operatorState.putState(i, subtaskState); } operatorState.registerSharedStates(sharedStateRegistry); return new TestCompletedCheckpoint(new JobID(), id, 0, operatorGroupState, props); }
Example #12
Source File: OperatorScopeFormat.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public String[] formatScope(TaskMetricGroup parent, OperatorID operatorID, String operatorName) { final String[] template = copyTemplate(); final String[] values = { parent.parent().parent().hostname(), parent.parent().parent().taskManagerId(), valueOrNull(parent.parent().jobId()), valueOrNull(parent.parent().jobName()), valueOrNull(parent.vertexId()), valueOrNull(parent.executionId()), valueOrNull(parent.taskName()), String.valueOf(parent.subtaskIndex()), String.valueOf(parent.attemptNumber()), valueOrNull(operatorID), valueOrNull(operatorName) }; return bindVariables(template, values); }
Example #13
Source File: KeyedStateInputFormatTest.java From flink with Apache License 2.0 | 6 votes |
@Test(expected = IOException.class) public void testInvalidProcessReaderFunctionFails() throws Exception { OperatorID operatorID = OperatorIDGenerator.fromUid("uid"); OperatorSubtaskState state = createOperatorSubtaskState(new StreamFlatMap<>(new StatefulFunction())); OperatorState operatorState = new OperatorState(operatorID, 1, 128); operatorState.putState(0, state); KeyedStateInputFormat<?, ?> format = new KeyedStateInputFormat<>(operatorState, new MemoryStateBackend(), Types.INT, new ReaderFunction()); KeyGroupRangeInputSplit split = format.createInputSplits(1)[0]; KeyedStateReaderFunction<Integer, Integer> userFunction = new InvalidReaderFunction(); readInputSplit(split, userFunction); Assert.fail("KeyedStateReaderFunction did not fail on invalid RuntimeContext use"); }
Example #14
Source File: FlinkKafkaProducerTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testOpenKafkaSerializationSchemaProducer() throws Exception { OpenTestingKafkaSerializationSchema schema = new OpenTestingKafkaSerializationSchema(); Properties properties = new Properties(); properties.put("bootstrap.servers", "localhost:9092"); FlinkKafkaProducer<Integer> kafkaProducer = new FlinkKafkaProducer<>( "test-topic", schema, properties, FlinkKafkaProducer.Semantic.AT_LEAST_ONCE ); OneInputStreamOperatorTestHarness<Integer, Object> testHarness = new OneInputStreamOperatorTestHarness<>( new StreamSink<>(kafkaProducer), 1, 1, 0, IntSerializer.INSTANCE, new OperatorID(1, 1)); testHarness.open(); assertThat(schema.openCalled, equalTo(true)); }
Example #15
Source File: StreamSource.java From flink with Apache License 2.0 | 6 votes |
public LatencyMarksEmitter( final ProcessingTimeService processingTimeService, final Output<StreamRecord<OUT>> output, long latencyTrackingInterval, final OperatorID operatorId, final int subtaskIndex) { latencyMarkTimer = processingTimeService.scheduleWithFixedDelay( new ProcessingTimeCallback() { @Override public void onProcessingTime(long timestamp) throws Exception { try { // ProcessingTimeService callbacks are executed under the checkpointing lock output.emitLatencyMarker(new LatencyMarker(processingTimeService.getCurrentProcessingTime(), operatorId, subtaskIndex)); } catch (Throwable t) { // we catch the Throwables here so that we don't trigger the processing // timer services async exception handler LOG.warn("Error while emitting latency marker.", t); } } }, 0L, latencyTrackingInterval); }
Example #16
Source File: OperatorGroupTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testGenerateScopeDefault() throws Exception { TaskManagerMetricGroup tmGroup = new TaskManagerMetricGroup(registry, "theHostName", "test-tm-id"); TaskManagerJobMetricGroup jmGroup = new TaskManagerJobMetricGroup(registry, tmGroup, new JobID(), "myJobName"); TaskMetricGroup taskGroup = new TaskMetricGroup( registry, jmGroup, new JobVertexID(), new AbstractID(), "aTaskName", 11, 0); OperatorMetricGroup opGroup = new OperatorMetricGroup(registry, taskGroup, new OperatorID(), "myOpName"); assertArrayEquals( new String[] { "theHostName", "taskmanager", "test-tm-id", "myJobName", "myOpName", "11" }, opGroup.getScopeComponents()); assertEquals( "theHostName.taskmanager.test-tm-id.myJobName.myOpName.11.name", opGroup.getMetricIdentifier("name")); }
Example #17
Source File: TestingClusterClient.java From flink with Apache License 2.0 | 5 votes |
@Override public CompletableFuture<CoordinationResponse> sendCoordinationRequest( JobID jobId, OperatorID operatorId, CoordinationRequest request) { throw new UnsupportedOperationException(); }
Example #18
Source File: CompletedCheckpointStoreTest.java From flink with Apache License 2.0 | 5 votes |
public TestCompletedCheckpoint( JobID jobId, long checkpointId, long timestamp, Map<OperatorID, OperatorState> operatorGroupState, CheckpointProperties props) { super(jobId, checkpointId, timestamp, Long.MAX_VALUE, operatorGroupState, null, props, new TestCompletedCheckpointStorageLocation()); }
Example #19
Source File: OperatorCoordinatorSchedulerTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testDeliveringClientRequestToNonExistingCoordinator() throws Exception { final OperatorCoordinator.Provider provider = new TestingOperatorCoordinator.Provider(testOperatorId); final DefaultScheduler scheduler = createScheduler(provider); final String payload = "testing payload"; final TestingCoordinationRequestHandler.Request<String> request = new TestingCoordinationRequestHandler.Request<>(payload); CommonTestUtils.assertThrows( "does not exist", FlinkException.class, () -> scheduler.deliverCoordinationRequestToCoordinator(new OperatorID(), request)); }
Example #20
Source File: RpcTaskOperatorEventGateway.java From flink with Apache License 2.0 | 5 votes |
@Override public void sendOperatorEventToCoordinator(OperatorID operator, SerializedValue<OperatorEvent> event) { final CompletableFuture<Acknowledge> result = rpcGateway.sendOperatorEventToCoordinator(taskExecutionId, operator, event); result.whenComplete((success, exception) -> { if (exception != null) { errorHandler.accept(exception); } }); }
Example #21
Source File: RemoteStreamEnvironmentTest.java From flink with Apache License 2.0 | 5 votes |
@Override public CompletableFuture<CoordinationResponse> sendCoordinationRequest( JobID jobId, OperatorID operatorId, CoordinationRequest request) { return null; }
Example #22
Source File: OperatorGroupTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testGenerateScopeCustom() throws Exception { Configuration cfg = new Configuration(); cfg.setString(MetricOptions.SCOPE_NAMING_OPERATOR, "<tm_id>.<job_id>.<task_id>.<operator_name>.<operator_id>"); MetricRegistryImpl registry = new MetricRegistryImpl(MetricRegistryConfiguration.fromConfiguration(cfg)); try { String tmID = "test-tm-id"; JobID jid = new JobID(); JobVertexID vertexId = new JobVertexID(); OperatorID operatorID = new OperatorID(); String operatorName = "operatorName"; OperatorMetricGroup operatorGroup = new TaskManagerMetricGroup(registry, "theHostName", tmID) .addTaskForJob(jid, "myJobName", vertexId, new ExecutionAttemptID(), "aTaskname", 13, 2) .getOrAddOperator(operatorID, operatorName); assertArrayEquals( new String[]{tmID, jid.toString(), vertexId.toString(), operatorName, operatorID.toString()}, operatorGroup.getScopeComponents()); assertEquals( String.format("%s.%s.%s.%s.%s.name", tmID, jid, vertexId, operatorName, operatorID), operatorGroup.getMetricIdentifier("name")); } finally { registry.shutdown().get(); } }
Example #23
Source File: TaskStateSnapshotTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void getStateSize() { Random random = new Random(0x42); TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot(); Assert.assertEquals(0, taskStateSnapshot.getStateSize()); OperatorSubtaskState emptyOperatorSubtaskState = new OperatorSubtaskState(); Assert.assertFalse(emptyOperatorSubtaskState.hasState()); taskStateSnapshot.putSubtaskStateByOperatorID(new OperatorID(), emptyOperatorSubtaskState); Assert.assertEquals(0, taskStateSnapshot.getStateSize()); OperatorStateHandle stateHandle_1 = StateHandleDummyUtil.createNewOperatorStateHandle(2, random); OperatorSubtaskState nonEmptyOperatorSubtaskState_1 = new OperatorSubtaskState( stateHandle_1, null, null, null ); OperatorStateHandle stateHandle_2 = StateHandleDummyUtil.createNewOperatorStateHandle(2, random); OperatorSubtaskState nonEmptyOperatorSubtaskState_2 = new OperatorSubtaskState( null, stateHandle_2, null, null ); taskStateSnapshot.putSubtaskStateByOperatorID(new OperatorID(), nonEmptyOperatorSubtaskState_1); taskStateSnapshot.putSubtaskStateByOperatorID(new OperatorID(), nonEmptyOperatorSubtaskState_2); long totalSize = stateHandle_1.getStateSize() + stateHandle_2.getStateSize(); Assert.assertEquals(totalSize, taskStateSnapshot.getStateSize()); }
Example #24
Source File: DataSinkTask.java From flink with Apache License 2.0 | 5 votes |
/** * Initializes the OutputFormat implementation and configuration. * * @throws RuntimeException * Throws if instance of OutputFormat implementation can not be * obtained. */ private void initOutputFormat() { ClassLoader userCodeClassLoader = getUserCodeClassLoader(); // obtain task configuration (including stub parameters) Configuration taskConf = getTaskConfiguration(); this.config = new TaskConfig(taskConf); final Pair<OperatorID, OutputFormat<IT>> operatorIDAndOutputFormat; InputOutputFormatContainer formatContainer = new InputOutputFormatContainer(config, userCodeClassLoader); try { operatorIDAndOutputFormat = formatContainer.getUniqueOutputFormat(); this.format = operatorIDAndOutputFormat.getValue(); // check if the class is a subclass, if the check is required if (!OutputFormat.class.isAssignableFrom(this.format.getClass())) { throw new RuntimeException("The class '" + this.format.getClass().getName() + "' is not a subclass of '" + OutputFormat.class.getName() + "' as is required."); } } catch (ClassCastException ccex) { throw new RuntimeException("The stub class is not a proper subclass of " + OutputFormat.class.getName(), ccex); } Thread thread = Thread.currentThread(); ClassLoader original = thread.getContextClassLoader(); // configure the stub. catch exceptions here extra, to report them as originating from the user code try { thread.setContextClassLoader(userCodeClassLoader); this.format.configure(formatContainer.getParameters(operatorIDAndOutputFormat.getKey())); } catch (Throwable t) { throw new RuntimeException("The user defined 'configure()' method in the Output Format caused an error: " + t.getMessage(), t); } finally { thread.setContextClassLoader(original); } }
Example #25
Source File: OneInputStreamTaskTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testHandlingEndOfInput() throws Exception { final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>( OneInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO); testHarness .setupOperatorChain(new OperatorID(), new TestBoundedOneInputStreamOperator("Operator0")) .chain( new OperatorID(), new TestBoundedOneInputStreamOperator("Operator1"), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig())) .finish(); ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>(); testHarness.invoke(); testHarness.waitForTaskRunning(); testHarness.processElement(new StreamRecord<>("Hello")); testHarness.endInput(); testHarness.waitForTaskCompletion(); expectedOutput.add(new StreamRecord<>("Hello")); expectedOutput.add(new StreamRecord<>("[Operator0]: Bye")); expectedOutput.add(new StreamRecord<>("[Operator1]: Bye")); TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput()); }
Example #26
Source File: TaskTestBase.java From flink with Apache License 2.0 | 5 votes |
public void registerFileOutputTask( FileOutputFormat<Record> outputFormat, String outPath, Configuration formatParams) { outputFormat.setOutputFilePath(new Path(outPath)); outputFormat.setWriteMode(WriteMode.OVERWRITE); OperatorID operatorID = new OperatorID(); new InputOutputFormatContainer(Thread.currentThread().getContextClassLoader()) .addOutputFormat(operatorID, outputFormat) .addParameters(operatorID, formatParams) .write(new TaskConfig(this.mockEnv.getTaskConfiguration())); }
Example #27
Source File: OneInputStreamOperatorTestHarness.java From flink with Apache License 2.0 | 5 votes |
public OneInputStreamOperatorTestHarness( OneInputStreamOperator<IN, OUT> operator, int maxParallelism, int parallelism, int subtaskIndex, OperatorID operatorID) throws Exception { super(operator, maxParallelism, parallelism, subtaskIndex, operatorID); this.oneInputOperator = operator; }
Example #28
Source File: CheckpointStateRestoreTest.java From flink with Apache License 2.0 | 5 votes |
private ExecutionJobVertex mockExecutionJobVertex(JobVertexID id, ExecutionVertex[] vertices) { ExecutionJobVertex vertex = mock(ExecutionJobVertex.class); when(vertex.getParallelism()).thenReturn(vertices.length); when(vertex.getMaxParallelism()).thenReturn(vertices.length); when(vertex.getJobVertexId()).thenReturn(id); when(vertex.getTaskVertices()).thenReturn(vertices); when(vertex.getOperatorIDs()).thenReturn(Collections.singletonList(OperatorIDPair.generatedIDOnly(OperatorID.fromJobVertexID(id)))); for (ExecutionVertex v : vertices) { when(v.getJobVertex()).thenReturn(vertex); } return vertex; }
Example #29
Source File: BootstrapTransformation.java From flink with Apache License 2.0 | 5 votes |
/** * @param operatorID The operator id for the stream operator. * @param stateBackend The state backend for the job. * @param globalMaxParallelism Global max parallelism set for the savepoint. * @param savepointPath The path where the savepoint will be written. * @return The operator subtask states for this bootstrap transformation. */ DataSet<OperatorState> writeOperatorState( OperatorID operatorID, StateBackend stateBackend, int globalMaxParallelism, Path savepointPath) { int localMaxParallelism = getMaxParallelism(globalMaxParallelism); return writeOperatorSubtaskStates(operatorID, stateBackend, savepointPath, localMaxParallelism) .reduceGroup(new OperatorSubtaskStateReducer(operatorID, localMaxParallelism)) .name("reduce(OperatorSubtaskState)"); }
Example #30
Source File: LatencyStatsTest.java From flink with Apache License 2.0 | 5 votes |
private static void assertName(final String registrationName, final OperatorID sourceId) { final String sanitizedName = sanitizeName(registrationName); Assert.assertEquals("source_id." + sourceId + ".operator_id." + OPERATOR_ID + ".operator_subtask_index." + OPERATOR_SUBTASK_INDEX + ".latency", sanitizedName); }