Java Code Examples for org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness#setTimeCharacteristic()
The following examples show how to use
org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness#setTimeCharacteristic() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FlinkKafkaConsumerBaseMigrationTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Test restoring from an legacy empty state, when no partitions could be found for topics. */ @Test public void testRestoreFromEmptyStateNoPartitions() throws Exception { final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>( Collections.singletonList("dummy-topic"), Collections.<KafkaTopicPartition>emptyList(), FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot")); testHarness.open(); // assert that no partitions were found and is empty assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); // assert that no state was restored assertTrue(consumerFunction.getRestoredState().isEmpty()); consumerOperator.close(); consumerOperator.cancel(); }
Example 2
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
private static <T> AbstractStreamOperatorTestHarness<T> createTestHarness( SourceFunction<T> source, int numSubtasks, int subtaskIndex) throws Exception { AbstractStreamOperatorTestHarness<T> testHarness = new AbstractStreamOperatorTestHarness<>( new StreamSource<>(source), maxParallelism, numSubtasks, subtaskIndex); testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime); return testHarness; }
Example 3
Source File: FlinkKafkaConsumerBaseMigrationTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Test restoring from a non-empty state taken using a previous Flink version, when some partitions could be * found for topics. */ @Test public void testRestore() throws Exception { final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot")); testHarness.open(); // assert that there are partitions and is identical to expected list assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); // on restore, subscribedPartitionsToStartOffsets should be identical to the restored state assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets()); // assert that state is correctly restored from legacy checkpoint assertTrue(consumerFunction.getRestoredState() != null); assertEquals(PARTITION_STATE, consumerFunction.getRestoredState()); consumerOperator.close(); consumerOperator.cancel(); }
Example 4
Source File: FlinkKafkaConsumerBaseMigrationTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Test restoring from savepoints before version Flink 1.3 should fail if discovery is enabled. */ @Test public void testRestoreFailsWithNonEmptyPreFlink13StatesIfDiscoveryEnabled() throws Exception { assumeTrue(testMigrateVersion == MigrationVersion.v1_3 || testMigrateVersion == MigrationVersion.v1_2); final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(TOPICS, partitions, 1000L); // discovery enabled StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file; should fail since discovery is enabled try { testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot")); fail("Restore from savepoints from version before Flink 1.3.x should have failed if discovery is enabled."); } catch (Exception e) { Assert.assertTrue(e instanceof IllegalArgumentException); } }
Example 5
Source File: FlinkKafkaConsumerBaseTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private static <T> AbstractStreamOperatorTestHarness<T> createTestHarness( SourceFunction<T> source, int numSubtasks, int subtaskIndex) throws Exception { AbstractStreamOperatorTestHarness<T> testHarness = new AbstractStreamOperatorTestHarness<>( new StreamSource<>(source), maxParallelism, numSubtasks, subtaskIndex); testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime); return testHarness; }
Example 6
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 5 votes |
/** * Test restoring from an legacy empty state, when no partitions could be found for topics. */ @Test public void testRestoreFromEmptyStateNoPartitions() throws Exception { final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>( Collections.singletonList("dummy-topic"), Collections.<KafkaTopicPartition>emptyList(), FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot")); testHarness.open(); // assert that no partitions were found and is empty assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); // assert that no state was restored assertTrue(consumerFunction.getRestoredState().isEmpty()); consumerOperator.close(); consumerOperator.cancel(); }
Example 7
Source File: FlinkPulsarSourceTest.java From pulsar-flink with Apache License 2.0 | 5 votes |
private static <T> AbstractStreamOperatorTestHarness<T> createTestHarness( SourceFunction<T> source, int numSubtasks, int subtaskIndex) throws Exception { AbstractStreamOperatorTestHarness<T> testHarness = new AbstractStreamOperatorTestHarness<>( new StreamSource<>(source), maxParallelism, numSubtasks, subtaskIndex); testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime); return testHarness; }
Example 8
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 5 votes |
/** * Test restoring from an legacy empty state, when no partitions could be found for topics. */ @Test public void testRestoreFromEmptyStateNoPartitions() throws Exception { final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>( Collections.singletonList("dummy-topic"), Collections.<KafkaTopicPartition>emptyList(), FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot")); testHarness.open(); // assert that no partitions were found and is empty assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); // assert that no state was restored assertTrue(consumerFunction.getRestoredState().isEmpty()); consumerOperator.close(); consumerOperator.cancel(); }
Example 9
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 5 votes |
/** * Test restoring from a non-empty state taken using a previous Flink version, when some partitions could be * found for topics. */ @Test public void testRestore() throws Exception { final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot")); testHarness.open(); // assert that there are partitions and is identical to expected list assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); // on restore, subscribedPartitionsToStartOffsets should be identical to the restored state assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets()); // assert that state is correctly restored from legacy checkpoint assertTrue(consumerFunction.getRestoredState() != null); assertEquals(PARTITION_STATE, consumerFunction.getRestoredState()); consumerOperator.close(); consumerOperator.cancel(); }
Example 10
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 5 votes |
/** * Test restoring from a non-empty state taken using a previous Flink version, when some partitions could be * found for topics. */ @Test public void testRestore() throws Exception { final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot")); testHarness.open(); // assert that there are partitions and is identical to expected list assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); // on restore, subscribedPartitionsToStartOffsets should be identical to the restored state assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets()); // assert that state is correctly restored from legacy checkpoint assertTrue(consumerFunction.getRestoredState() != null); assertEquals(PARTITION_STATE, consumerFunction.getRestoredState()); consumerOperator.close(); consumerOperator.cancel(); }
Example 11
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 5 votes |
/** * Test restoring from savepoints before version Flink 1.3 should fail if discovery is enabled. */ @Test public void testRestoreFailsWithNonEmptyPreFlink13StatesIfDiscoveryEnabled() throws Exception { assumeTrue(testMigrateVersion == MigrationVersion.v1_3 || testMigrateVersion == MigrationVersion.v1_2); final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(TOPICS, partitions, 1000L); // discovery enabled StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file; should fail since discovery is enabled try { testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot")); fail("Restore from savepoints from version before Flink 1.3.x should have failed if discovery is enabled."); } catch (Exception e) { Assert.assertTrue(e instanceof IllegalArgumentException); } }
Example 12
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 4 votes |
/** * Test restoring from an empty state taken using a previous Flink version, when some partitions could be * found for topics. */ @Test public void testRestoreFromEmptyStateWithPartitions() throws Exception { final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot")); testHarness.open(); // the expected state in "kafka-consumer-migration-test-flink1.x-snapshot-empty-state"; // all new partitions after the snapshot are considered as partitions that were created while the // consumer wasn't running, and should start from the earliest offset. final HashMap<KafkaTopicPartition, Long> expectedSubscribedPartitionsWithStartOffsets = new HashMap<>(); for (KafkaTopicPartition partition : PARTITION_STATE.keySet()) { expectedSubscribedPartitionsWithStartOffsets.put(partition, KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET); } // assert that there are partitions and is identical to expected list assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); assertEquals(expectedSubscribedPartitionsWithStartOffsets, consumerFunction.getSubscribedPartitionsToStartOffsets()); // the new partitions should have been considered as restored state assertTrue(consumerFunction.getRestoredState() != null); assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); for (Map.Entry<KafkaTopicPartition, Long> expectedEntry : expectedSubscribedPartitionsWithStartOffsets.entrySet()) { assertEquals(expectedEntry.getValue(), consumerFunction.getRestoredState().get(expectedEntry.getKey())); } consumerOperator.close(); consumerOperator.cancel(); }
Example 13
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 4 votes |
private void writeSnapshot(String path, HashMap<KafkaTopicPartition, Long> state) throws Exception { final OneShotLatch latch = new OneShotLatch(); final AbstractFetcher<String, ?> fetcher = mock(AbstractFetcher.class); doAnswer(new Answer<Void>() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { latch.trigger(); return null; } }).when(fetcher).runFetchLoop(); when(fetcher.snapshotCurrentState()).thenReturn(state); final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(fetcher, TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); testHarness.open(); final Throwable[] error = new Throwable[1]; // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { consumerFunction.run(new DummySourceContext() { @Override public void collect(String element) { } }); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); if (!latch.isTriggered()) { latch.await(); } final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle(snapshot, path); consumerOperator.close(); runner.join(); }
Example 14
Source File: UnboundedSourceWrapperTest.java From beam with Apache License 2.0 | 4 votes |
@Test(timeout = 30_000) public void testValueEmission() throws Exception { final int numElementsPerShard = 20; FlinkPipelineOptions options = PipelineOptionsFactory.as(FlinkPipelineOptions.class); final long[] numElementsReceived = {0L}; final int[] numWatermarksReceived = {0}; // this source will emit exactly NUM_ELEMENTS for each parallel reader, // afterwards it will stall. We check whether we also receive NUM_ELEMENTS // elements later. TestCountingSource source = new TestCountingSource(numElementsPerShard).withFixedNumSplits(numSplits); for (int subtaskIndex = 0; subtaskIndex < numTasks; subtaskIndex++) { UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark> flinkWrapper = new UnboundedSourceWrapper<>("stepName", options, source, numTasks); // the source wrapper will only request as many splits as there are tasks and the source // will create at most numSplits splits assertEquals(numSplits, flinkWrapper.getSplitSources().size()); StreamSource< WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>, UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>> sourceOperator = new StreamSource<>(flinkWrapper); AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>> testHarness = new AbstractStreamOperatorTestHarness<>( sourceOperator, numTasks /* max parallelism */, numTasks /* parallelism */, subtaskIndex /* subtask index */); // The testing timer service is synchronous, so we must configure a watermark interval // > 0, otherwise we can get loop infinitely due to a timer always becoming ready after // it has been set. testHarness.getExecutionConfig().setAutoWatermarkInterval(10L); testHarness.setProcessingTime(System.currentTimeMillis()); testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime); Thread processingTimeUpdateThread = startProcessingTimeUpdateThread(testHarness); try { testHarness.open(); StreamSources.run( sourceOperator, testHarness.getCheckpointLock(), new TestStreamStatusMaintainer(), new Output<StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>>() { private boolean hasSeenMaxWatermark = false; @Override public void emitWatermark(Watermark watermark) { // we get this when there is no more data // it can happen that we get the max watermark several times, so guard against // this if (!hasSeenMaxWatermark && watermark.getTimestamp() >= BoundedWindow.TIMESTAMP_MAX_VALUE.getMillis()) { numWatermarksReceived[0]++; hasSeenMaxWatermark = true; } } @Override public <X> void collect(OutputTag<X> outputTag, StreamRecord<X> streamRecord) { collect((StreamRecord) streamRecord); } @Override public void emitLatencyMarker(LatencyMarker latencyMarker) {} @Override public void collect( StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>> windowedValueStreamRecord) { numElementsReceived[0]++; } @Override public void close() {} }); } finally { processingTimeUpdateThread.interrupt(); processingTimeUpdateThread.join(); } } // verify that we get the expected count across all subtasks assertEquals(numElementsPerShard * numSplits, numElementsReceived[0]); // and that we get as many final watermarks as there are subtasks assertEquals(numTasks, numWatermarksReceived[0]); }
Example 15
Source File: FlinkKinesisConsumerMigrationTest.java From flink with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") private void writeSnapshot(String path, HashMap<StreamShardMetadata, SequenceNumber> state) throws Exception { final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(state.size()); for (StreamShardMetadata shardMetadata : state.keySet()) { Shard shard = new Shard(); shard.setShardId(shardMetadata.getShardId()); SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.withStartingSequenceNumber("1"); shard.setSequenceNumberRange(sequenceNumberRange); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard)); } final TestFetcher<String> fetcher = new TestFetcher<>( Collections.singletonList(TEST_STREAM_NAME), new TestSourceContext<>(), new TestRuntimeContext(true, 1, 0), TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), state, initialDiscoveryShards); final DummyFlinkKinesisConsumer<String> consumer = new DummyFlinkKinesisConsumer<>( fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema())); StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumer); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); testHarness.open(); final AtomicReference<Throwable> error = new AtomicReference<>(); // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { consumer.run(new TestSourceContext<>()); } catch (Throwable t) { t.printStackTrace(); error.set(t); } } }; runner.start(); fetcher.waitUntilRun(); final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle(snapshot, path); consumerOperator.close(); runner.join(); }
Example 16
Source File: FlinkKinesisConsumerMigrationTest.java From flink with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") private void writeSnapshot(String path, HashMap<StreamShardMetadata, SequenceNumber> state) throws Exception { final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(state.size()); for (StreamShardMetadata shardMetadata : state.keySet()) { Shard shard = new Shard(); shard.setShardId(shardMetadata.getShardId()); SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.withStartingSequenceNumber("1"); shard.setSequenceNumberRange(sequenceNumberRange); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard)); } final TestFetcher<String> fetcher = new TestFetcher<>( Collections.singletonList(TEST_STREAM_NAME), new TestSourceContext<>(), new TestRuntimeContext(true, 1, 0), TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), state, initialDiscoveryShards); final DummyFlinkKinesisConsumer<String> consumer = new DummyFlinkKinesisConsumer<>( fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema())); StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumer); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); testHarness.open(); final AtomicReference<Throwable> error = new AtomicReference<>(); // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { consumer.run(new TestSourceContext<>()); } catch (Throwable t) { t.printStackTrace(); error.set(t); } } }; runner.start(); fetcher.waitUntilRun(); final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle(snapshot, path); consumerOperator.close(); runner.join(); }
Example 17
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 4 votes |
/** * Test restoring from an empty state taken using a previous Flink version, when some partitions could be * found for topics. */ @Test public void testRestoreFromEmptyStateWithPartitions() throws Exception { final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot")); testHarness.open(); // the expected state in "kafka-consumer-migration-test-flink1.2-snapshot-empty-state"; // all new partitions after the snapshot are considered as partitions that were created while the // consumer wasn't running, and should start from the earliest offset. final HashMap<KafkaTopicPartition, Long> expectedSubscribedPartitionsWithStartOffsets = new HashMap<>(); for (KafkaTopicPartition partition : PARTITION_STATE.keySet()) { expectedSubscribedPartitionsWithStartOffsets.put(partition, KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET); } // assert that there are partitions and is identical to expected list assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); assertEquals(expectedSubscribedPartitionsWithStartOffsets, consumerFunction.getSubscribedPartitionsToStartOffsets()); // the new partitions should have been considered as restored state assertTrue(consumerFunction.getRestoredState() != null); assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); for (Map.Entry<KafkaTopicPartition, Long> expectedEntry : expectedSubscribedPartitionsWithStartOffsets.entrySet()) { assertEquals(expectedEntry.getValue(), consumerFunction.getRestoredState().get(expectedEntry.getKey())); } consumerOperator.close(); consumerOperator.cancel(); }
Example 18
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 4 votes |
private void writeSnapshot(String path, HashMap<KafkaTopicPartition, Long> state) throws Exception { final OneShotLatch latch = new OneShotLatch(); final AbstractFetcher<String, ?> fetcher = mock(AbstractFetcher.class); doAnswer(new Answer<Void>() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { latch.trigger(); return null; } }).when(fetcher).runFetchLoop(); when(fetcher.snapshotCurrentState()).thenReturn(state); final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(fetcher, TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); testHarness.open(); final Throwable[] error = new Throwable[1]; // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { consumerFunction.run(new DummySourceContext() { @Override public void collect(String element) { } }); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); if (!latch.isTriggered()) { latch.await(); } final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle(snapshot, path); consumerOperator.close(); runner.join(); }
Example 19
Source File: FlinkKinesisConsumerMigrationTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") private void writeSnapshot(String path, HashMap<StreamShardMetadata, SequenceNumber> state) throws Exception { final TestFetcher<String> fetcher = new TestFetcher<>( Collections.singletonList(TEST_STREAM_NAME), new TestSourceContext<>(), new TestRuntimeContext(true, 1, 0), new Properties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), state, null); final DummyFlinkKinesisConsumer<String> consumer = new DummyFlinkKinesisConsumer<>( fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema())); StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumer); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); testHarness.open(); final AtomicReference<Throwable> error = new AtomicReference<>(); // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { consumer.run(new TestSourceContext<>()); } catch (Throwable t) { t.printStackTrace(); error.set(t); } } }; runner.start(); fetcher.waitUntilRun(); final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle(snapshot, path); consumerOperator.close(); runner.join(); }
Example 20
Source File: FlinkKafkaConsumerBaseMigrationTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Test restoring from an empty state taken using a previous Flink version, when some partitions could be * found for topics. */ @Test public void testRestoreFromEmptyStateWithPartitions() throws Exception { final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot")); testHarness.open(); // the expected state in "kafka-consumer-migration-test-flink1.2-snapshot-empty-state"; // all new partitions after the snapshot are considered as partitions that were created while the // consumer wasn't running, and should start from the earliest offset. final HashMap<KafkaTopicPartition, Long> expectedSubscribedPartitionsWithStartOffsets = new HashMap<>(); for (KafkaTopicPartition partition : PARTITION_STATE.keySet()) { expectedSubscribedPartitionsWithStartOffsets.put(partition, KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET); } // assert that there are partitions and is identical to expected list assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); assertEquals(expectedSubscribedPartitionsWithStartOffsets, consumerFunction.getSubscribedPartitionsToStartOffsets()); // the new partitions should have been considered as restored state assertTrue(consumerFunction.getRestoredState() != null); assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); for (Map.Entry<KafkaTopicPartition, Long> expectedEntry : expectedSubscribedPartitionsWithStartOffsets.entrySet()) { assertEquals(expectedEntry.getValue(), consumerFunction.getRestoredState().get(expectedEntry.getKey())); } consumerOperator.close(); consumerOperator.cancel(); }