org.apache.flink.runtime.checkpoint.OperatorSubtaskState Java Examples
The following examples show how to use
org.apache.flink.runtime.checkpoint.OperatorSubtaskState.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FlinkKafkaProducer011ITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testRecoverCommittedTransaction() throws Exception { String topic = "flink-kafka-producer-recover-committed-transaction"; OneInputStreamOperatorTestHarness<Integer, Object> testHarness = createTestHarness(topic); testHarness.setup(); testHarness.open(); // producerA - start transaction (txn) 0 testHarness.processElement(42, 0); // producerA - write 42 in txn 0 OperatorSubtaskState checkpoint0 = testHarness.snapshot(0, 1); // producerA - pre commit txn 0, producerB - start txn 1 testHarness.processElement(43, 2); // producerB - write 43 in txn 1 testHarness.notifyOfCompletedCheckpoint(0); // producerA - commit txn 0 and return to the pool testHarness.snapshot(1, 3); // producerB - pre txn 1, producerA - start txn 2 testHarness.processElement(44, 4); // producerA - write 44 in txn 2 testHarness.close(); // producerA - abort txn 2 testHarness = createTestHarness(topic); testHarness.initializeState(checkpoint0); // recover state 0 - producerA recover and commit txn 0 testHarness.close(); assertExactlyOnceForTopic(createProperties(), topic, 0, Arrays.asList(42)); deleteTestTopic(topic); checkProducerLeak(); }
Example #2
Source File: KeyedStateInputFormatTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testReadMultipleOutputPerKey() throws Exception { OperatorID operatorID = OperatorIDGenerator.fromUid("uid"); OperatorSubtaskState state = createOperatorSubtaskState(new StreamFlatMap<>(new StatefulFunction())); OperatorState operatorState = new OperatorState(operatorID, 1, 128); operatorState.putState(0, state); KeyedStateInputFormat<?, ?, ?> format = new KeyedStateInputFormat<>(operatorState, new MemoryStateBackend(), new Configuration(), new KeyedStateReaderOperator<>(new ReaderFunction(), Types.INT)); KeyGroupRangeInputSplit split = format.createInputSplits(1)[0]; KeyedStateReaderFunction<Integer, Integer> userFunction = new DoubleReaderFunction(); List<Integer> data = readInputSplit(split, userFunction); Assert.assertEquals("Incorrect data read from input split", Arrays.asList(1, 1, 2, 2, 3, 3), data); }
Example #3
Source File: FlinkKafkaProducerITCase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testRecoverCommittedTransaction() throws Exception { String topic = "flink-kafka-producer-recover-committed-transaction"; OneInputStreamOperatorTestHarness<Integer, Object> testHarness = createTestHarness(topic); testHarness.setup(); testHarness.open(); // producerA - start transaction (txn) 0 testHarness.processElement(42, 0); // producerA - write 42 in txn 0 OperatorSubtaskState checkpoint0 = testHarness.snapshot(0, 1); // producerA - pre commit txn 0, producerB - start txn 1 testHarness.processElement(43, 2); // producerB - write 43 in txn 1 testHarness.notifyOfCompletedCheckpoint(0); // producerA - commit txn 0 and return to the pool testHarness.snapshot(1, 3); // producerB - pre txn 1, producerA - start txn 2 testHarness.processElement(44, 4); // producerA - write 44 in txn 2 testHarness.close(); // producerA - abort txn 2 testHarness = createTestHarness(topic); testHarness.initializeState(checkpoint0); // recover state 0 - producerA recover and commit txn 0 testHarness.close(); assertExactlyOnceForTopic(createProperties(), topic, 0, Arrays.asList(42)); deleteTestTopic(topic); checkProducerLeak(); }
Example #4
Source File: FlinkKafkaProducerITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testRecoverCommittedTransaction() throws Exception { String topic = "flink-kafka-producer-recover-committed-transaction"; OneInputStreamOperatorTestHarness<Integer, Object> testHarness = createTestHarness(topic); testHarness.setup(); testHarness.open(); // producerA - start transaction (txn) 0 testHarness.processElement(42, 0); // producerA - write 42 in txn 0 OperatorSubtaskState checkpoint0 = testHarness.snapshot(0, 1); // producerA - pre commit txn 0, producerB - start txn 1 testHarness.processElement(43, 2); // producerB - write 43 in txn 1 testHarness.notifyOfCompletedCheckpoint(0); // producerA - commit txn 0 and return to the pool testHarness.snapshot(1, 3); // producerB - pre txn 1, producerA - start txn 2 testHarness.processElement(44, 4); // producerA - write 44 in txn 2 testHarness.close(); // producerA - abort txn 2 testHarness = createTestHarness(topic); testHarness.initializeState(checkpoint0); // recover state 0 - producerA recover and commit txn 0 testHarness.close(); assertExactlyOnceForTopic(createProperties(), topic, 0, Arrays.asList(42)); deleteTestTopic(topic); checkProducerLeak(); }
Example #5
Source File: FailoverRegionTest.java From flink with Apache License 2.0 | 6 votes |
/** * Let the checkpoint coordinator to receive all acknowledges from given executionVertexes so that to complete the expected checkpoint. */ private void acknowledgeAllCheckpoints(CheckpointCoordinator checkpointCoordinator, Iterator<ExecutionVertex> executionVertexes) throws IOException, CheckpointException { while (executionVertexes.hasNext()) { ExecutionVertex executionVertex = executionVertexes.next(); for (int index = 0; index < executionVertex.getJobVertex().getParallelism(); index++) { JobVertexID jobVertexID = executionVertex.getJobvertexId(); OperatorStateHandle opStateBackend = CheckpointCoordinatorTest.generatePartitionableStateHandle(jobVertexID, index, 2, 8, false); OperatorSubtaskState operatorSubtaskState = new OperatorSubtaskState(opStateBackend, null, null, null); TaskStateSnapshot taskOperatorSubtaskStates = new TaskStateSnapshot(); taskOperatorSubtaskStates.putSubtaskStateByOperatorID(OperatorID.fromJobVertexID(jobVertexID), operatorSubtaskState); AcknowledgeCheckpoint acknowledgeCheckpoint = new AcknowledgeCheckpoint( executionVertex.getJobId(), executionVertex.getJobVertex().getTaskVertices()[index].getCurrentExecutionAttempt().getAttemptId(), checkpointId, new CheckpointMetrics(), taskOperatorSubtaskStates); checkpointCoordinator.receiveAcknowledgeMessage(acknowledgeCheckpoint, "Unknown location"); } } }
Example #6
Source File: AbstractStreamOperatorTestHarnessTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testInitializeAfterOpenning() throws Throwable { expectedException.expect(IllegalStateException.class); expectedException.expectMessage(containsString("TestHarness has already been initialized.")); AbstractStreamOperatorTestHarness<Integer> result; result = new AbstractStreamOperatorTestHarness<>( new AbstractStreamOperator<Integer>() { }, 1, 1, 0); result.setup(); result.open(); result.initializeState(new OperatorSubtaskState()); }
Example #7
Source File: FlinkKafkaProducerITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testRecoverCommittedTransaction() throws Exception { String topic = "flink-kafka-producer-recover-committed-transaction"; OneInputStreamOperatorTestHarness<Integer, Object> testHarness = createTestHarness(topic); testHarness.setup(); testHarness.open(); // producerA - start transaction (txn) 0 testHarness.processElement(42, 0); // producerA - write 42 in txn 0 OperatorSubtaskState checkpoint0 = testHarness.snapshot(0, 1); // producerA - pre commit txn 0, producerB - start txn 1 testHarness.processElement(43, 2); // producerB - write 43 in txn 1 testHarness.notifyOfCompletedCheckpoint(0); // producerA - commit txn 0 and return to the pool testHarness.snapshot(1, 3); // producerB - pre txn 1, producerA - start txn 2 testHarness.processElement(44, 4); // producerA - write 44 in txn 2 testHarness.close(); // producerA - abort txn 2 testHarness = createTestHarness(topic); testHarness.initializeState(checkpoint0); // recover state 0 - producerA recover and commit txn 0 testHarness.close(); assertExactlyOnceForTopic(createProperties(), topic, 0, Arrays.asList(42)); deleteTestTopic(topic); checkProducerLeak(); }
Example #8
Source File: JobMasterTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private Collection<OperatorState> createOperatorState(OperatorID... operatorIds) { Collection<OperatorState> operatorStates = new ArrayList<>(operatorIds.length); for (OperatorID operatorId : operatorIds) { final OperatorState operatorState = new OperatorState(operatorId, 1, 42); final OperatorSubtaskState subtaskState = new OperatorSubtaskState( new OperatorStreamStateHandle( Collections.emptyMap(), new ByteStreamStateHandle("foobar", new byte[0])), null, null, null); operatorState.putState(0, subtaskState); operatorStates.add(operatorState); } return operatorStates; }
Example #9
Source File: KafkaMigrationTestBase.java From flink with Apache License 2.0 | 6 votes |
private OperatorSubtaskState initializeTestState() throws Exception { try (OneInputStreamOperatorTestHarness testHarness = createTestHarness()) { testHarness.setup(); testHarness.open(); // Create a committed transaction testHarness.processElement(42, 0L); // TODO: when stop with savepoint is available, replace this code with it (with stop with savepoint // there won't be any pending transactions) OperatorSubtaskState snapshot = testHarness.snapshot(0L, 1L); // We kind of simulate stop with savepoint by making sure that notifyOfCompletedCheckpoint is called testHarness.notifyOfCompletedCheckpoint(0L); // Create a Pending transaction testHarness.processElement(43, 2L); return snapshot; } }
Example #10
Source File: KeyedStateInputFormatTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testReadTime() throws Exception { OperatorID operatorID = OperatorIDGenerator.fromUid("uid"); OperatorSubtaskState state = createOperatorSubtaskState(new KeyedProcessOperator<>(new StatefulFunctionWithTime())); OperatorState operatorState = new OperatorState(operatorID, 1, 128); operatorState.putState(0, state); KeyedStateInputFormat<?, ?, ?> format = new KeyedStateInputFormat<>(operatorState, new MemoryStateBackend(), new Configuration(), new KeyedStateReaderOperator<>(new TimerReaderFunction(), Types.INT)); KeyGroupRangeInputSplit split = format.createInputSplits(1)[0]; KeyedStateReaderFunction<Integer, Integer> userFunction = new TimerReaderFunction(); List<Integer> data = readInputSplit(split, userFunction); Assert.assertEquals("Incorrect data read from input split", Arrays.asList(1, 1, 2, 2, 3, 3), data); }
Example #11
Source File: JobMasterTest.java From flink with Apache License 2.0 | 6 votes |
private Collection<OperatorState> createOperatorState(OperatorID... operatorIds) { Random random = new Random(); Collection<OperatorState> operatorStates = new ArrayList<>(operatorIds.length); for (OperatorID operatorId : operatorIds) { final OperatorState operatorState = new OperatorState(operatorId, 1, 42); final OperatorSubtaskState subtaskState = new OperatorSubtaskState( new OperatorStreamStateHandle(Collections.emptyMap(), new ByteStreamStateHandle("foobar", new byte[0])), null, null, null, singleton(createNewInputChannelStateHandle(10, random)), singleton(createNewResultSubpartitionStateHandle(10, random))); operatorState.putState(0, subtaskState); operatorStates.add(operatorState); } return operatorStates; }
Example #12
Source File: MetadataSerializer.java From flink-learning with Apache License 2.0 | 6 votes |
/** * 解析 operatorSubtaskState 的 ManagedKeyedState * * @param operatorSubtaskState operatorSubtaskState */ private static void parseManagedKeyedState(OperatorSubtaskState operatorSubtaskState) { // 遍历当前 subtask 的 KeyedState for (KeyedStateHandle keyedStateHandle : operatorSubtaskState.getManagedKeyedState()) { // 本案例针对 Flink RocksDB 的增量 Checkpoint 引发的问题, // 因此仅处理 IncrementalRemoteKeyedStateHandle if (keyedStateHandle instanceof IncrementalRemoteKeyedStateHandle) { // 获取 RocksDB 的 sharedState Map<StateHandleID, StreamStateHandle> sharedState = ((IncrementalRemoteKeyedStateHandle) keyedStateHandle).getSharedState(); // 遍历 sharedState 中所有的 sst 文件,key 为 sst 文件名,value 为对应的 hdfs 文件 Handle for (Map.Entry<StateHandleID, StreamStateHandle> entry : sharedState.entrySet()) { // 打印 sst 文件名 System.out.println("sstable 文件名:" + entry.getKey()); if (entry.getValue() instanceof FileStateHandle) { Path filePath = ((FileStateHandle) entry.getValue()).getFilePath(); // 打印 sst 文件对应的 hdfs 文件位置 System.out.println("sstable 文件对应的 hdfs 位置:" + filePath.getPath()); } } } } }
Example #13
Source File: KeyedStateBootstrapOperatorTest.java From flink with Apache License 2.0 | 5 votes |
private <T> KeyedOneInputStreamOperatorTestHarness<Long, Long, T> getHarness(OneInputStreamOperator<Long, T> bootstrapOperator, OperatorSubtaskState state) throws Exception { KeyedOneInputStreamOperatorTestHarness<Long, Long, T> harness = new KeyedOneInputStreamOperatorTestHarness<>( bootstrapOperator, id -> id, Types.LONG, 128, 1, 0); harness.setStateBackend(new RocksDBStateBackend(folder.newFolder().toURI())); if (state != null) { harness.initializeState(state); } harness.open(); return harness; }
Example #14
Source File: ListCheckpointedTest.java From flink with Apache License 2.0 | 5 votes |
private static void testUDF(TestUserFunction userFunction) throws Exception { OperatorSubtaskState snapshot; try (AbstractStreamOperatorTestHarness<Integer> testHarness = createTestHarness(userFunction)) { testHarness.open(); snapshot = testHarness.snapshot(0L, 0L); assertFalse(userFunction.isRestored()); } try (AbstractStreamOperatorTestHarness<Integer> testHarness = createTestHarness(userFunction)) { testHarness.initializeState(snapshot); testHarness.open(); assertTrue(userFunction.isRestored()); } }
Example #15
Source File: UnionStateInputFormatTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testReadUnionOperatorState() throws Exception { try (OneInputStreamOperatorTestHarness<Integer, Void> testHarness = getTestHarness()) { testHarness.open(); testHarness.processElement(1, 0); testHarness.processElement(2, 0); testHarness.processElement(3, 0); OperatorSubtaskState subtaskState = testHarness.snapshot(0, 0); OperatorState state = new OperatorState(OperatorIDGenerator.fromUid("uid"), 1, 4); state.putState(0, subtaskState); OperatorStateInputSplit split = new OperatorStateInputSplit(subtaskState.getManagedOperatorState(), 0); UnionStateInputFormat<Integer> format = new UnionStateInputFormat<>(state, descriptor); format.setRuntimeContext(new MockStreamingRuntimeContext(false, 1, 0)); format.open(split); List<Integer> results = new ArrayList<>(); while (!format.reachedEnd()) { results.add(format.nextRecord(0)); } results.sort(Comparator.naturalOrder()); Assert.assertEquals("Failed to read correct list state from state backend", Arrays.asList(1, 2, 3), results); } }
Example #16
Source File: CoBroadcastWithNonKeyedOperatorTest.java From flink with Apache License 2.0 | 5 votes |
private static OperatorSubtaskState repartitionInitState( final OperatorSubtaskState initState, final int numKeyGroups, final int oldParallelism, final int newParallelism, final int subtaskIndex ) { return AbstractStreamOperatorTestHarness.repartitionOperatorState(initState, numKeyGroups, oldParallelism, newParallelism, subtaskIndex); }
Example #17
Source File: SavepointV2Serializer.java From flink with Apache License 2.0 | 5 votes |
private static OperatorSubtaskState deserializeSubtaskState(DataInputStream dis) throws IOException { // Duration field has been removed from SubtaskState, do not remove long ignoredDuration = dis.readLong(); // for compatibility, do not remove int len = dis.readInt(); if (SavepointSerializers.FAIL_WHEN_LEGACY_STATE_DETECTED) { Preconditions.checkState(len == 0, "Legacy state (from Flink <= 1.1, created through the 'Checkpointed' interface) is " + "no longer supported starting from Flink 1.4. Please rewrite your job to use " + "'CheckpointedFunction' instead!"); } else { for (int i = 0; i < len; ++i) { // absorb bytes from stream and ignore result deserializeStreamStateHandle(dis); } } len = dis.readInt(); OperatorStateHandle operatorStateBackend = len == 0 ? null : deserializeOperatorStateHandle(dis); len = dis.readInt(); OperatorStateHandle operatorStateStream = len == 0 ? null : deserializeOperatorStateHandle(dis); KeyedStateHandle keyedStateBackend = deserializeKeyedStateHandle(dis); KeyedStateHandle keyedStateStream = deserializeKeyedStateHandle(dis); return new OperatorSubtaskState( operatorStateBackend, operatorStateStream, keyedStateBackend, keyedStateStream); }
Example #18
Source File: OperatorSnapshotFinalizer.java From flink with Apache License 2.0 | 5 votes |
public OperatorSnapshotFinalizer( @Nonnull OperatorSnapshotFutures snapshotFutures) throws ExecutionException, InterruptedException { SnapshotResult<KeyedStateHandle> keyedManaged = FutureUtils.runIfNotDoneAndGet(snapshotFutures.getKeyedStateManagedFuture()); SnapshotResult<KeyedStateHandle> keyedRaw = FutureUtils.runIfNotDoneAndGet(snapshotFutures.getKeyedStateRawFuture()); SnapshotResult<OperatorStateHandle> operatorManaged = FutureUtils.runIfNotDoneAndGet(snapshotFutures.getOperatorStateManagedFuture()); SnapshotResult<OperatorStateHandle> operatorRaw = FutureUtils.runIfNotDoneAndGet(snapshotFutures.getOperatorStateRawFuture()); SnapshotResult<StateObjectCollection<InputChannelStateHandle>> inputChannel = snapshotFutures.getInputChannelStateFuture().get(); SnapshotResult<StateObjectCollection<ResultSubpartitionStateHandle>> resultSubpartition = snapshotFutures.getResultSubpartitionStateFuture().get(); jobManagerOwnedState = new OperatorSubtaskState( operatorManaged.getJobManagerOwnedSnapshot(), operatorRaw.getJobManagerOwnedSnapshot(), keyedManaged.getJobManagerOwnedSnapshot(), keyedRaw.getJobManagerOwnedSnapshot(), inputChannel.getJobManagerOwnedSnapshot(), resultSubpartition.getJobManagerOwnedSnapshot() ); taskLocalState = new OperatorSubtaskState( operatorManaged.getTaskLocalSnapshot(), operatorRaw.getTaskLocalSnapshot(), keyedManaged.getTaskLocalSnapshot(), keyedRaw.getTaskLocalSnapshot(), inputChannel.getTaskLocalSnapshot(), resultSubpartition.getTaskLocalSnapshot() ); }
Example #19
Source File: KeyedStateInputFormatTest.java From flink with Apache License 2.0 | 5 votes |
private OperatorSubtaskState createOperatorSubtaskState(OneInputStreamOperator<Integer, Void> operator) throws Exception { try (KeyedOneInputStreamOperatorTestHarness<Integer, Integer, Void> testHarness = new KeyedOneInputStreamOperatorTestHarness<>(operator, id -> id, Types.INT, 128, 1, 0)) { testHarness.setup(VoidSerializer.INSTANCE); testHarness.open(); testHarness.processElement(1, 0); testHarness.processElement(2, 0); testHarness.processElement(3, 0); return testHarness.snapshot(0, 0); } }
Example #20
Source File: TwoPhaseCommitSinkFunctionTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testIgnoreCommitExceptionDuringRecovery() throws Exception { clock.setEpochMilli(0); harness.open(); harness.processElement("42", 0); final OperatorSubtaskState snapshot = harness.snapshot(0, 1); harness.notifyOfCompletedCheckpoint(1); throwException.set(true); closeTestHarness(); setUpTestHarness(); final long transactionTimeout = 1000; sinkFunction.setTransactionTimeout(transactionTimeout); sinkFunction.ignoreFailuresAfterTransactionTimeout(); try { harness.initializeState(snapshot); fail("Expected exception not thrown"); } catch (RuntimeException e) { assertEquals("Expected exception", e.getMessage()); } clock.setEpochMilli(transactionTimeout + 1); harness.initializeState(snapshot); assertExactlyOnce(Collections.singletonList("42")); }
Example #21
Source File: SnapshotUtils.java From flink with Apache License 2.0 | 5 votes |
public static <OUT, OP extends StreamOperator<OUT>> TaggedOperatorSubtaskState snapshot( OP operator, int index, long timestamp, boolean isExactlyOnceMode, boolean isUnalignedCheckpoint, CheckpointStorageWorkerView checkpointStorage, Path savepointPath) throws Exception { CheckpointOptions options = new CheckpointOptions( CheckpointType.SAVEPOINT, AbstractFsCheckpointStorage.encodePathAsReference(savepointPath), isExactlyOnceMode, isUnalignedCheckpoint); operator.prepareSnapshotPreBarrier(CHECKPOINT_ID); CheckpointStreamFactory storage = checkpointStorage.resolveCheckpointStorageLocation( CHECKPOINT_ID, options.getTargetLocation()); OperatorSnapshotFutures snapshotInProgress = operator.snapshotState( CHECKPOINT_ID, timestamp, options, storage); OperatorSubtaskState state = new OperatorSnapshotFinalizer(snapshotInProgress).getJobManagerOwnedState(); operator.notifyCheckpointComplete(CHECKPOINT_ID); return new TaggedOperatorSubtaskState(index, state); }
Example #22
Source File: FlinkKafkaProducerITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void testRecoverWithChangeSemantics( String topic, FlinkKafkaProducer.Semantic fromSemantic, FlinkKafkaProducer.Semantic toSemantic) throws Exception { OperatorSubtaskState producerSnapshot; try (OneInputStreamOperatorTestHarness<Integer, Object> testHarness = createTestHarness(topic, fromSemantic)) { testHarness.setup(); testHarness.open(); testHarness.processElement(42, 0); testHarness.snapshot(0, 1); testHarness.processElement(43, 2); testHarness.notifyOfCompletedCheckpoint(0); producerSnapshot = testHarness.snapshot(1, 3); testHarness.processElement(44, 4); } try (OneInputStreamOperatorTestHarness<Integer, Object> testHarness = createTestHarness(topic, toSemantic)) { testHarness.setup(); testHarness.initializeState(producerSnapshot); testHarness.open(); testHarness.processElement(45, 7); testHarness.snapshot(2, 8); testHarness.processElement(46, 9); testHarness.notifyOfCompletedCheckpoint(2); testHarness.processElement(47, 9); } checkProducerLeak(); }
Example #23
Source File: TaskLocalStateStoreImplTest.java From flink with Apache License 2.0 | 5 votes |
private List<TaskStateSnapshot> storeStates(int count) { List<TaskStateSnapshot> taskStateSnapshots = new ArrayList<>(count); for (int i = 0; i < count; ++i) { OperatorID operatorID = new OperatorID(); TaskStateSnapshot taskStateSnapshot = spy(new TaskStateSnapshot()); OperatorSubtaskState operatorSubtaskState = new OperatorSubtaskState(); taskStateSnapshot.putSubtaskStateByOperatorID(operatorID, operatorSubtaskState); taskLocalStateStore.storeLocalState(i, taskStateSnapshot); taskStateSnapshots.add(taskStateSnapshot); } return taskStateSnapshots; }
Example #24
Source File: KafkaMigrationTestBase.java From flink with Apache License 2.0 | 5 votes |
/** * Manually run this to write binary snapshot data. */ @Ignore @Test public void writeSnapshot() throws Exception { try { checkState(flinkGenerateSavepointVersion.isPresent()); startClusters(); OperatorSubtaskState snapshot = initializeTestState(); OperatorSnapshotUtil.writeStateHandle(snapshot, getOperatorSnapshotPath(flinkGenerateSavepointVersion.get())); } finally { shutdownClusters(); } }
Example #25
Source File: TwoPhaseCommitSinkFunctionTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testLogTimeoutAlmostReachedWarningDuringRecovery() throws Exception { clock.setEpochMilli(0); final long transactionTimeout = 1000; final double warningRatio = 0.5; sinkFunction.setTransactionTimeout(transactionTimeout); sinkFunction.enableTransactionTimeoutWarnings(warningRatio); harness.open(); final OperatorSubtaskState snapshot = harness.snapshot(0, 1); final long elapsedTime = (long) ((double) transactionTimeout * warningRatio + 2); clock.setEpochMilli(elapsedTime); closeTestHarness(); setUpTestHarness(); sinkFunction.setTransactionTimeout(transactionTimeout); sinkFunction.enableTransactionTimeoutWarnings(warningRatio); harness.initializeState(snapshot); harness.open(); final List<String> logMessages = loggingEvents.stream().map(LoggingEvent::getRenderedMessage).collect(Collectors.toList()); closeTestHarness(); assertThat( logMessages, hasItem(containsString("has been open for 502 ms. " + "This is close to or even exceeding the transaction timeout of 1000 ms."))); }
Example #26
Source File: TwoPhaseCommitSinkFunctionTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testIgnoreCommitExceptionDuringRecovery() throws Exception { clock.setEpochMilli(0); harness.open(); harness.processElement("42", 0); final OperatorSubtaskState snapshot = harness.snapshot(0, 1); harness.notifyOfCompletedCheckpoint(1); throwException.set(true); closeTestHarness(); setUpTestHarness(); final long transactionTimeout = 1000; sinkFunction.setTransactionTimeout(transactionTimeout); sinkFunction.ignoreFailuresAfterTransactionTimeout(); try { harness.initializeState(snapshot); fail("Expected exception not thrown"); } catch (RuntimeException e) { assertEquals("Expected exception", e.getMessage()); } clock.setEpochMilli(transactionTimeout + 1); harness.initializeState(snapshot); assertExactlyOnce(Collections.singletonList("42")); }
Example #27
Source File: CEPMigrationTest.java From flink with Apache License 2.0 | 5 votes |
/** * Manually run this to write binary snapshot data. */ @Ignore @Test public void writeAndOrSubtypConditionsPatternAfterMigrationSnapshot() throws Exception { KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() { private static final long serialVersionUID = -4873366487571254798L; @Override public Integer getKey(Event value) throws Exception { return value.getId(); } }; final Event startEvent1 = new SubEvent(42, "start", 1.0, 6.0); OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = new KeyedOneInputStreamOperatorTestHarness<>( getKeyedCepOpearator(false, new NFAComplexConditionsFactory()), keySelector, BasicTypeInfo.INT_TYPE_INFO); try { harness.setup(); harness.open(); harness.processElement(new StreamRecord<>(startEvent1, 5)); harness.processWatermark(new Watermark(6)); // do snapshot and save to file OperatorSubtaskState snapshot = harness.snapshot(0L, 0L); OperatorSnapshotUtil.writeStateHandle(snapshot, "src/test/resources/cep-migration-conditions-flink" + flinkGenerateSavepointVersion + "-snapshot"); } finally { harness.close(); } }
Example #28
Source File: KeyGroupRangeInputSplit.java From flink with Apache License 2.0 | 5 votes |
public PrioritizedOperatorSubtaskState getPrioritizedOperatorSubtaskState() { return new PrioritizedOperatorSubtaskState.Builder( new OperatorSubtaskState( StateObjectCollection.empty(), StateObjectCollection.empty(), new StateObjectCollection<>(managedKeyedState), new StateObjectCollection<>(rawKeyedState) ), Collections.emptyList() ).build(); }
Example #29
Source File: AbstractStreamOperatorTestHarness.java From flink with Apache License 2.0 | 5 votes |
/** * Calls {@link org.apache.flink.streaming.api.operators.StreamOperator#initializeState()}. * Calls {@link org.apache.flink.streaming.api.operators.SetupableStreamOperator#setup(StreamTask, StreamConfig, Output)} * if it was not called before. * * @param jmOperatorStateHandles the primary state (owned by JM) * @param tmOperatorStateHandles the (optional) local state (owned by TM) or null. * @throws Exception */ public void initializeState( OperatorSubtaskState jmOperatorStateHandles, OperatorSubtaskState tmOperatorStateHandles) throws Exception { checkState(!initializeCalled, "TestHarness has already been initialized. Have you " + "opened this harness before initializing it?"); if (!setupCalled) { setup(); } if (jmOperatorStateHandles != null) { TaskStateSnapshot jmTaskStateSnapshot = new TaskStateSnapshot(); jmTaskStateSnapshot.putSubtaskStateByOperatorID(operator.getOperatorID(), jmOperatorStateHandles); taskStateManager.setReportedCheckpointId(0); taskStateManager.setJobManagerTaskStateSnapshotsByCheckpointId( Collections.singletonMap(0L, jmTaskStateSnapshot)); if (tmOperatorStateHandles != null) { TaskStateSnapshot tmTaskStateSnapshot = new TaskStateSnapshot(); tmTaskStateSnapshot.putSubtaskStateByOperatorID(operator.getOperatorID(), tmOperatorStateHandles); taskStateManager.setTaskManagerTaskStateSnapshotsByCheckpointId( Collections.singletonMap(0L, tmTaskStateSnapshot)); } } operator.initializeState(); initializeCalled = true; }
Example #30
Source File: BucketingSinkMigrationTest.java From flink with Apache License 2.0 | 5 votes |
/** * Manually run this to write binary snapshot data. Remove @Ignore to run. */ @Ignore @Test public void writeSnapshot() throws Exception { final File outDir = tempFolder.newFolder(); BucketingSink<String> sink = new BucketingSink<String>(outDir.getAbsolutePath()) .setWriter(new StringWriter<String>()) .setBatchSize(5) .setPartPrefix(PART_PREFIX) .setInProgressPrefix("") .setPendingPrefix("") .setValidLengthPrefix("") .setInProgressSuffix(IN_PROGRESS_SUFFIX) .setPendingSuffix(PENDING_SUFFIX) .setValidLengthSuffix(VALID_LENGTH_SUFFIX); OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink)); testHarness.setup(); testHarness.open(); testHarness.processElement(new StreamRecord<>("test1", 0L)); testHarness.processElement(new StreamRecord<>("test2", 0L)); checkLocalFs(outDir, 1, 1, 0, 0); testHarness.processElement(new StreamRecord<>("test3", 0L)); testHarness.processElement(new StreamRecord<>("test4", 0L)); testHarness.processElement(new StreamRecord<>("test5", 0L)); checkLocalFs(outDir, 1, 4, 0, 0); OperatorSubtaskState snapshot = testHarness.snapshot(0L, 0L); OperatorSnapshotUtil.writeStateHandle(snapshot, "src/test/resources/bucketing-sink-migration-test-flink" + flinkGenerateSavepointVersion + "-snapshot"); testHarness.close(); }