com.amazonaws.services.kinesis.model.SequenceNumberRange Java Examples
The following examples show how to use
com.amazonaws.services.kinesis.model.SequenceNumberRange.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KinesisDataFetcher.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Utility function to convert {@link StreamShardMetadata} into {@link StreamShardHandle}. * * @param streamShardMetadata the {@link StreamShardMetadata} to be converted * @return a {@link StreamShardHandle} object */ public static StreamShardHandle convertToStreamShardHandle(StreamShardMetadata streamShardMetadata) { Shard shard = new Shard(); shard.withShardId(streamShardMetadata.getShardId()); shard.withParentShardId(streamShardMetadata.getParentShardId()); shard.withAdjacentParentShardId(streamShardMetadata.getAdjacentParentShardId()); HashKeyRange hashKeyRange = new HashKeyRange(); hashKeyRange.withStartingHashKey(streamShardMetadata.getStartingHashKey()); hashKeyRange.withEndingHashKey(streamShardMetadata.getEndingHashKey()); shard.withHashKeyRange(hashKeyRange); SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.withStartingSequenceNumber(streamShardMetadata.getStartingSequenceNumber()); sequenceNumberRange.withEndingSequenceNumber(streamShardMetadata.getEndingSequenceNumber()); shard.withSequenceNumberRange(sequenceNumberRange); return new StreamShardHandle(streamShardMetadata.getStreamName(), shard); }
Example #2
Source File: FakeKinesisBehavioursFactory.java From flink with Apache License 2.0 | 6 votes |
public BlockingQueueKinesis(Map<String, List<BlockingQueue<String>>> streamsToShardCount) { for (Map.Entry<String, List<BlockingQueue<String>>> streamToShardQueues : streamsToShardCount.entrySet()) { String streamName = streamToShardQueues.getKey(); int shardCount = streamToShardQueues.getValue().size(); if (shardCount == 0) { // don't do anything } else { List<StreamShardHandle> shardsOfStream = new ArrayList<>(shardCount); for (int i = 0; i < shardCount; i++) { StreamShardHandle shardHandle = new StreamShardHandle( streamName, new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(i)) .withSequenceNumberRange(new SequenceNumberRange().withStartingSequenceNumber("0")) .withHashKeyRange(new HashKeyRange().withStartingHashKey("0").withEndingHashKey("0"))); shardsOfStream.add(shardHandle); shardIteratorToQueueMap.put(getShardIterator(shardHandle), streamToShardQueues.getValue().get(i)); } streamsWithListOfShards.put(streamName, shardsOfStream); } } }
Example #3
Source File: KinesisDataFetcher.java From flink with Apache License 2.0 | 6 votes |
/** * Utility function to convert {@link StreamShardMetadata} into {@link StreamShardHandle}. * * @param streamShardMetadata the {@link StreamShardMetadata} to be converted * @return a {@link StreamShardHandle} object */ public static StreamShardHandle convertToStreamShardHandle(StreamShardMetadata streamShardMetadata) { Shard shard = new Shard(); shard.withShardId(streamShardMetadata.getShardId()); shard.withParentShardId(streamShardMetadata.getParentShardId()); shard.withAdjacentParentShardId(streamShardMetadata.getAdjacentParentShardId()); HashKeyRange hashKeyRange = new HashKeyRange(); hashKeyRange.withStartingHashKey(streamShardMetadata.getStartingHashKey()); hashKeyRange.withEndingHashKey(streamShardMetadata.getEndingHashKey()); shard.withHashKeyRange(hashKeyRange); SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.withStartingSequenceNumber(streamShardMetadata.getStartingSequenceNumber()); sequenceNumberRange.withEndingSequenceNumber(streamShardMetadata.getEndingSequenceNumber()); shard.withSequenceNumberRange(sequenceNumberRange); return new StreamShardHandle(streamShardMetadata.getStreamName(), shard); }
Example #4
Source File: FakeKinesisBehavioursFactory.java From flink with Apache License 2.0 | 6 votes |
public BlockingQueueKinesis(Map<String, List<BlockingQueue<String>>> streamsToShardCount) { for (Map.Entry<String, List<BlockingQueue<String>>> streamToShardQueues : streamsToShardCount.entrySet()) { String streamName = streamToShardQueues.getKey(); int shardCount = streamToShardQueues.getValue().size(); if (shardCount == 0) { // don't do anything } else { List<StreamShardHandle> shardsOfStream = new ArrayList<>(shardCount); for (int i = 0; i < shardCount; i++) { StreamShardHandle shardHandle = new StreamShardHandle( streamName, new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(i)) .withSequenceNumberRange(new SequenceNumberRange().withStartingSequenceNumber("0")) .withHashKeyRange(new HashKeyRange().withStartingHashKey("0").withEndingHashKey("0"))); shardsOfStream.add(shardHandle); shardIteratorToQueueMap.put(getShardIterator(shardHandle), streamToShardQueues.getValue().get(i)); } streamsWithListOfShards.put(streamName, shardsOfStream); } } }
Example #5
Source File: KinesisDataFetcher.java From flink with Apache License 2.0 | 6 votes |
/** * Utility function to convert {@link StreamShardMetadata} into {@link StreamShardHandle}. * * @param streamShardMetadata the {@link StreamShardMetadata} to be converted * @return a {@link StreamShardHandle} object */ public static StreamShardHandle convertToStreamShardHandle(StreamShardMetadata streamShardMetadata) { Shard shard = new Shard(); shard.withShardId(streamShardMetadata.getShardId()); shard.withParentShardId(streamShardMetadata.getParentShardId()); shard.withAdjacentParentShardId(streamShardMetadata.getAdjacentParentShardId()); HashKeyRange hashKeyRange = new HashKeyRange(); hashKeyRange.withStartingHashKey(streamShardMetadata.getStartingHashKey()); hashKeyRange.withEndingHashKey(streamShardMetadata.getEndingHashKey()); shard.withHashKeyRange(hashKeyRange); SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.withStartingSequenceNumber(streamShardMetadata.getStartingSequenceNumber()); sequenceNumberRange.withEndingSequenceNumber(streamShardMetadata.getEndingSequenceNumber()); shard.withSequenceNumberRange(sequenceNumberRange); return new StreamShardHandle(streamShardMetadata.getStreamName(), shard); }
Example #6
Source File: FakeKinesisBehavioursFactory.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public BlockingQueueKinesis(Map<String, List<BlockingQueue<String>>> streamsToShardCount) { for (Map.Entry<String, List<BlockingQueue<String>>> streamToShardQueues : streamsToShardCount.entrySet()) { String streamName = streamToShardQueues.getKey(); int shardCount = streamToShardQueues.getValue().size(); if (shardCount == 0) { // don't do anything } else { List<StreamShardHandle> shardsOfStream = new ArrayList<>(shardCount); for (int i = 0; i < shardCount; i++) { StreamShardHandle shardHandle = new StreamShardHandle( streamName, new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(i)) .withSequenceNumberRange(new SequenceNumberRange().withStartingSequenceNumber("0")) .withHashKeyRange(new HashKeyRange().withStartingHashKey("0").withEndingHashKey("0"))); shardsOfStream.add(shardHandle); shardIteratorToQueueMap.put(getShardIterator(shardHandle), streamToShardQueues.getValue().get(i)); } streamsWithListOfShards.put(streamName, shardsOfStream); } } }
Example #7
Source File: MockKinesisClient.java From presto with Apache License 2.0 | 5 votes |
public InternalStream(String streamName, int shardCount, boolean isActive) { this.streamName = streamName; this.streamAmazonResourceName = "local:fake.stream:" + streamName; if (isActive) { this.streamStatus = "ACTIVE"; } for (int i = 0; i < shardCount; i++) { InternalShard newShard = new InternalShard(this.streamName, i); newShard.setSequenceNumberRange((new SequenceNumberRange()).withStartingSequenceNumber("100").withEndingSequenceNumber("999")); this.shards.add(newShard); } }
Example #8
Source File: FlinkKinesisConsumerTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testLegacyKinesisStreamShardToStreamShardMetadataConversion() { String streamName = "fakeStream1"; String shardId = "shard-000001"; String parentShardId = "shard-000002"; String adjacentParentShardId = "shard-000003"; String startingHashKey = "key-000001"; String endingHashKey = "key-000010"; String startingSequenceNumber = "seq-0000021"; String endingSequenceNumber = "seq-00000031"; StreamShardMetadata streamShardMetadata = new StreamShardMetadata(); streamShardMetadata.setStreamName(streamName); streamShardMetadata.setShardId(shardId); streamShardMetadata.setParentShardId(parentShardId); streamShardMetadata.setAdjacentParentShardId(adjacentParentShardId); streamShardMetadata.setStartingHashKey(startingHashKey); streamShardMetadata.setEndingHashKey(endingHashKey); streamShardMetadata.setStartingSequenceNumber(startingSequenceNumber); streamShardMetadata.setEndingSequenceNumber(endingSequenceNumber); Shard shard = new Shard() .withShardId(shardId) .withParentShardId(parentShardId) .withAdjacentParentShardId(adjacentParentShardId) .withHashKeyRange(new HashKeyRange() .withStartingHashKey(startingHashKey) .withEndingHashKey(endingHashKey)) .withSequenceNumberRange(new SequenceNumberRange() .withStartingSequenceNumber(startingSequenceNumber) .withEndingSequenceNumber(endingSequenceNumber)); KinesisStreamShard kinesisStreamShard = new KinesisStreamShard(streamName, shard); assertEquals(streamShardMetadata, KinesisStreamShard.convertToStreamShardMetadata(kinesisStreamShard)); }
Example #9
Source File: KinesisDataFetcherTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testStreamShardMetadataAndHandleConversion() { String streamName = "fakeStream1"; String shardId = "shard-000001"; String parentShardId = "shard-000002"; String adjacentParentShardId = "shard-000003"; String startingHashKey = "key-000001"; String endingHashKey = "key-000010"; String startingSequenceNumber = "seq-0000021"; String endingSequenceNumber = "seq-00000031"; StreamShardMetadata kinesisStreamShard = new StreamShardMetadata(); kinesisStreamShard.setStreamName(streamName); kinesisStreamShard.setShardId(shardId); kinesisStreamShard.setParentShardId(parentShardId); kinesisStreamShard.setAdjacentParentShardId(adjacentParentShardId); kinesisStreamShard.setStartingHashKey(startingHashKey); kinesisStreamShard.setEndingHashKey(endingHashKey); kinesisStreamShard.setStartingSequenceNumber(startingSequenceNumber); kinesisStreamShard.setEndingSequenceNumber(endingSequenceNumber); Shard shard = new Shard() .withShardId(shardId) .withParentShardId(parentShardId) .withAdjacentParentShardId(adjacentParentShardId) .withHashKeyRange(new HashKeyRange() .withStartingHashKey(startingHashKey) .withEndingHashKey(endingHashKey)) .withSequenceNumberRange(new SequenceNumberRange() .withStartingSequenceNumber(startingSequenceNumber) .withEndingSequenceNumber(endingSequenceNumber)); StreamShardHandle streamShardHandle = new StreamShardHandle(streamName, shard); assertEquals(kinesisStreamShard, KinesisDataFetcher.convertToStreamShardMetadata(streamShardHandle)); assertEquals(streamShardHandle, KinesisDataFetcher.convertToStreamShardHandle(kinesisStreamShard)); }
Example #10
Source File: FlinkKinesisConsumerTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testLegacyKinesisStreamShardToStreamShardMetadataConversion() { String streamName = "fakeStream1"; String shardId = "shard-000001"; String parentShardId = "shard-000002"; String adjacentParentShardId = "shard-000003"; String startingHashKey = "key-000001"; String endingHashKey = "key-000010"; String startingSequenceNumber = "seq-0000021"; String endingSequenceNumber = "seq-00000031"; StreamShardMetadata streamShardMetadata = new StreamShardMetadata(); streamShardMetadata.setStreamName(streamName); streamShardMetadata.setShardId(shardId); streamShardMetadata.setParentShardId(parentShardId); streamShardMetadata.setAdjacentParentShardId(adjacentParentShardId); streamShardMetadata.setStartingHashKey(startingHashKey); streamShardMetadata.setEndingHashKey(endingHashKey); streamShardMetadata.setStartingSequenceNumber(startingSequenceNumber); streamShardMetadata.setEndingSequenceNumber(endingSequenceNumber); Shard shard = new Shard() .withShardId(shardId) .withParentShardId(parentShardId) .withAdjacentParentShardId(adjacentParentShardId) .withHashKeyRange(new HashKeyRange() .withStartingHashKey(startingHashKey) .withEndingHashKey(endingHashKey)) .withSequenceNumberRange(new SequenceNumberRange() .withStartingSequenceNumber(startingSequenceNumber) .withEndingSequenceNumber(endingSequenceNumber)); KinesisStreamShard kinesisStreamShard = new KinesisStreamShard(streamName, shard); assertEquals(streamShardMetadata, KinesisStreamShard.convertToStreamShardMetadata(kinesisStreamShard)); }
Example #11
Source File: KinesisDataFetcherTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testStreamShardMetadataAndHandleConversion() { String streamName = "fakeStream1"; String shardId = "shard-000001"; String parentShardId = "shard-000002"; String adjacentParentShardId = "shard-000003"; String startingHashKey = "key-000001"; String endingHashKey = "key-000010"; String startingSequenceNumber = "seq-0000021"; String endingSequenceNumber = "seq-00000031"; StreamShardMetadata kinesisStreamShard = new StreamShardMetadata(); kinesisStreamShard.setStreamName(streamName); kinesisStreamShard.setShardId(shardId); kinesisStreamShard.setParentShardId(parentShardId); kinesisStreamShard.setAdjacentParentShardId(adjacentParentShardId); kinesisStreamShard.setStartingHashKey(startingHashKey); kinesisStreamShard.setEndingHashKey(endingHashKey); kinesisStreamShard.setStartingSequenceNumber(startingSequenceNumber); kinesisStreamShard.setEndingSequenceNumber(endingSequenceNumber); Shard shard = new Shard() .withShardId(shardId) .withParentShardId(parentShardId) .withAdjacentParentShardId(adjacentParentShardId) .withHashKeyRange(new HashKeyRange() .withStartingHashKey(startingHashKey) .withEndingHashKey(endingHashKey)) .withSequenceNumberRange(new SequenceNumberRange() .withStartingSequenceNumber(startingSequenceNumber) .withEndingSequenceNumber(endingSequenceNumber)); StreamShardHandle streamShardHandle = new StreamShardHandle(streamName, shard); assertEquals(kinesisStreamShard, KinesisDataFetcher.convertToStreamShardMetadata(streamShardHandle)); assertEquals(streamShardHandle, KinesisDataFetcher.convertToStreamShardHandle(kinesisStreamShard)); }
Example #12
Source File: FlinkKinesisConsumerTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testLegacyKinesisStreamShardToStreamShardMetadataConversion() { String streamName = "fakeStream1"; String shardId = "shard-000001"; String parentShardId = "shard-000002"; String adjacentParentShardId = "shard-000003"; String startingHashKey = "key-000001"; String endingHashKey = "key-000010"; String startingSequenceNumber = "seq-0000021"; String endingSequenceNumber = "seq-00000031"; StreamShardMetadata streamShardMetadata = new StreamShardMetadata(); streamShardMetadata.setStreamName(streamName); streamShardMetadata.setShardId(shardId); streamShardMetadata.setParentShardId(parentShardId); streamShardMetadata.setAdjacentParentShardId(adjacentParentShardId); streamShardMetadata.setStartingHashKey(startingHashKey); streamShardMetadata.setEndingHashKey(endingHashKey); streamShardMetadata.setStartingSequenceNumber(startingSequenceNumber); streamShardMetadata.setEndingSequenceNumber(endingSequenceNumber); Shard shard = new Shard() .withShardId(shardId) .withParentShardId(parentShardId) .withAdjacentParentShardId(adjacentParentShardId) .withHashKeyRange(new HashKeyRange() .withStartingHashKey(startingHashKey) .withEndingHashKey(endingHashKey)) .withSequenceNumberRange(new SequenceNumberRange() .withStartingSequenceNumber(startingSequenceNumber) .withEndingSequenceNumber(endingSequenceNumber)); KinesisStreamShard kinesisStreamShard = new KinesisStreamShard(streamName, shard); assertEquals(streamShardMetadata, KinesisStreamShard.convertToStreamShardMetadata(kinesisStreamShard)); }
Example #13
Source File: MockKinesisClient.java From presto-kinesis with Apache License 2.0 | 5 votes |
public InternalStream(String aName, int nbShards, boolean isActive) { this.streamName = aName; this.streamARN = "local:fake.stream:" + aName; if (isActive) { this.streamStatus = "ACTIVE"; } for (int i = 0; i < nbShards; i++) { InternalShard newShard = new InternalShard(this.streamName, i); newShard.setSequenceNumberRange((new SequenceNumberRange()).withStartingSequenceNumber("100").withEndingSequenceNumber("999")); this.shards.add(newShard); } }
Example #14
Source File: KinesisDataFetcherTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testStreamShardMetadataAndHandleConversion() { String streamName = "fakeStream1"; String shardId = "shard-000001"; String parentShardId = "shard-000002"; String adjacentParentShardId = "shard-000003"; String startingHashKey = "key-000001"; String endingHashKey = "key-000010"; String startingSequenceNumber = "seq-0000021"; String endingSequenceNumber = "seq-00000031"; StreamShardMetadata kinesisStreamShard = new StreamShardMetadata(); kinesisStreamShard.setStreamName(streamName); kinesisStreamShard.setShardId(shardId); kinesisStreamShard.setParentShardId(parentShardId); kinesisStreamShard.setAdjacentParentShardId(adjacentParentShardId); kinesisStreamShard.setStartingHashKey(startingHashKey); kinesisStreamShard.setEndingHashKey(endingHashKey); kinesisStreamShard.setStartingSequenceNumber(startingSequenceNumber); kinesisStreamShard.setEndingSequenceNumber(endingSequenceNumber); Shard shard = new Shard() .withShardId(shardId) .withParentShardId(parentShardId) .withAdjacentParentShardId(adjacentParentShardId) .withHashKeyRange(new HashKeyRange() .withStartingHashKey(startingHashKey) .withEndingHashKey(endingHashKey)) .withSequenceNumberRange(new SequenceNumberRange() .withStartingSequenceNumber(startingSequenceNumber) .withEndingSequenceNumber(endingSequenceNumber)); StreamShardHandle streamShardHandle = new StreamShardHandle(streamName, shard); assertEquals(kinesisStreamShard, KinesisDataFetcher.convertToStreamShardMetadata(streamShardHandle)); assertEquals(streamShardHandle, KinesisDataFetcher.convertToStreamShardHandle(kinesisStreamShard)); }
Example #15
Source File: FlinkKinesisConsumerMigrationTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testRestoreWithEmptyState() throws Exception { final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size()); for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) { Shard shard = new Shard(); shard.setShardId(shardMetadata.getShardId()); SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.withStartingSequenceNumber("1"); shard.setSequenceNumberRange(sequenceNumberRange); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard)); } final TestFetcher<String> fetcher = new TestFetcher<>( Collections.singletonList(TEST_STREAM_NAME), new TestSourceContext<>(), new TestRuntimeContext(true, 1, 0), TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), null, initialDiscoveryShards); final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>( fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema())); StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setup(); testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kinesis-consumer-migration-test-flink" + testMigrateVersion + "-empty-snapshot")); testHarness.open(); consumerFunction.run(new TestSourceContext<>()); // assert that no state was restored assertTrue(consumerFunction.getRestoredState().isEmpty()); // although the restore state is empty, the fetcher should still have been registered the initial discovered shard; // furthermore, the discovered shard should be considered a newly created shard while the job wasn't running, // and therefore should be consumed from the earliest sequence number KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0); assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName()); assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId()); assertFalse(restoredShardState.getStreamShardHandle().isClosed()); assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredShardState.getLastProcessedSequenceNum()); consumerOperator.close(); consumerOperator.cancel(); }
Example #16
Source File: FlinkKinesisConsumerTest.java From flink with Apache License 2.0 | 4 votes |
/** * FLINK-8484: ensure that a state change in the StreamShardMetadata other than {@link StreamShardMetadata#getShardId()} or * {@link StreamShardMetadata#getStreamName()} does not result in the shard not being able to be restored. * This handles the corner case where the stored shard metadata is open (no ending sequence number), but after the * job restore, the shard has been closed (ending number set) due to re-sharding, and we can no longer rely on * {@link StreamShardMetadata#equals(Object)} to find back the sequence number in the collection of restored shard metadata. * <p></p> * Therefore, we will rely on synchronizing the snapshot's state with the Kinesis shard before attempting to find back * the sequence number to restore. */ @Test public void testFindSequenceNumberToRestoreFromIfTheShardHasBeenClosedSinceTheStateWasStored() throws Exception { // ---------------------------------------------------------------------- // setup initial state // ---------------------------------------------------------------------- HashMap<StreamShardHandle, SequenceNumber> fakeRestoredState = getFakeRestoredStore("all"); // ---------------------------------------------------------------------- // mock operator state backend and initial state for initializeState() // ---------------------------------------------------------------------- TestingListState<Tuple2<StreamShardMetadata, SequenceNumber>> listState = new TestingListState<>(); for (Map.Entry<StreamShardHandle, SequenceNumber> state : fakeRestoredState.entrySet()) { listState.add(Tuple2.of(KinesisDataFetcher.convertToStreamShardMetadata(state.getKey()), state.getValue())); } OperatorStateStore operatorStateStore = mock(OperatorStateStore.class); when(operatorStateStore.getUnionListState(Matchers.any(ListStateDescriptor.class))).thenReturn(listState); StateInitializationContext initializationContext = mock(StateInitializationContext.class); when(initializationContext.getOperatorStateStore()).thenReturn(operatorStateStore); when(initializationContext.isRestored()).thenReturn(true); // ---------------------------------------------------------------------- // mock fetcher // ---------------------------------------------------------------------- KinesisDataFetcher mockedFetcher = mockKinesisDataFetcher(); List<StreamShardHandle> shards = new ArrayList<>(); // create a fake stream shard handle based on the first entry in the restored state final StreamShardHandle originalStreamShardHandle = fakeRestoredState.keySet().iterator().next(); final StreamShardHandle closedStreamShardHandle = new StreamShardHandle(originalStreamShardHandle.getStreamName(), originalStreamShardHandle.getShard()); // close the shard handle by setting an ending sequence number final SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.setEndingSequenceNumber("1293844"); closedStreamShardHandle.getShard().setSequenceNumberRange(sequenceNumberRange); shards.add(closedStreamShardHandle); when(mockedFetcher.discoverNewShardsToSubscribe()).thenReturn(shards); // assume the given config is correct PowerMockito.mockStatic(KinesisConfigUtil.class); PowerMockito.doNothing().when(KinesisConfigUtil.class); // ---------------------------------------------------------------------- // start to test fetcher's initial state seeding // ---------------------------------------------------------------------- TestableFlinkKinesisConsumer consumer = new TestableFlinkKinesisConsumer( "fakeStream", new Properties(), 10, 2); consumer.initializeState(initializationContext); consumer.open(new Configuration()); consumer.run(Mockito.mock(SourceFunction.SourceContext.class)); Mockito.verify(mockedFetcher).registerNewSubscribedShardState( new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(closedStreamShardHandle), closedStreamShardHandle, fakeRestoredState.get(closedStreamShardHandle))); }
Example #17
Source File: FlinkKinesisConsumerMigrationTest.java From flink with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") private void writeSnapshot(String path, HashMap<StreamShardMetadata, SequenceNumber> state) throws Exception { final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(state.size()); for (StreamShardMetadata shardMetadata : state.keySet()) { Shard shard = new Shard(); shard.setShardId(shardMetadata.getShardId()); SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.withStartingSequenceNumber("1"); shard.setSequenceNumberRange(sequenceNumberRange); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard)); } final TestFetcher<String> fetcher = new TestFetcher<>( Collections.singletonList(TEST_STREAM_NAME), new TestSourceContext<>(), new TestRuntimeContext(true, 1, 0), TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), state, initialDiscoveryShards); final DummyFlinkKinesisConsumer<String> consumer = new DummyFlinkKinesisConsumer<>( fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema())); StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumer); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); testHarness.open(); final AtomicReference<Throwable> error = new AtomicReference<>(); // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { consumer.run(new TestSourceContext<>()); } catch (Throwable t) { t.printStackTrace(); error.set(t); } } }; runner.start(); fetcher.waitUntilRun(); final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle(snapshot, path); consumerOperator.close(); runner.join(); }
Example #18
Source File: FlinkKinesisConsumerMigrationTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testRestoreWithReshardedStream() throws Exception { final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size()); for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) { // setup the closed shard Shard closedShard = new Shard(); closedShard.setShardId(shardMetadata.getShardId()); SequenceNumberRange closedSequenceNumberRange = new SequenceNumberRange(); closedSequenceNumberRange.withStartingSequenceNumber("1"); closedSequenceNumberRange.withEndingSequenceNumber("1087654321"); // this represents a closed shard closedShard.setSequenceNumberRange(closedSequenceNumberRange); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), closedShard)); // setup the new shards Shard newSplitShard1 = new Shard(); newSplitShard1.setShardId(KinesisShardIdGenerator.generateFromShardOrder(1)); SequenceNumberRange newSequenceNumberRange1 = new SequenceNumberRange(); newSequenceNumberRange1.withStartingSequenceNumber("1087654322"); newSplitShard1.setSequenceNumberRange(newSequenceNumberRange1); newSplitShard1.setParentShardId(TEST_SHARD_ID); Shard newSplitShard2 = new Shard(); newSplitShard2.setShardId(KinesisShardIdGenerator.generateFromShardOrder(2)); SequenceNumberRange newSequenceNumberRange2 = new SequenceNumberRange(); newSequenceNumberRange2.withStartingSequenceNumber("2087654322"); newSplitShard2.setSequenceNumberRange(newSequenceNumberRange2); newSplitShard2.setParentShardId(TEST_SHARD_ID); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard1)); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard2)); } final TestFetcher<String> fetcher = new TestFetcher<>( Collections.singletonList(TEST_STREAM_NAME), new TestSourceContext<>(), new TestRuntimeContext(true, 1, 0), TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), null, initialDiscoveryShards); final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>( fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema())); StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setup(); testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot")); testHarness.open(); consumerFunction.run(new TestSourceContext<>()); // assert that state is correctly restored assertNotEquals(null, consumerFunction.getRestoredState()); assertEquals(1, consumerFunction.getRestoredState().size()); assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState())); // assert that the fetcher is registered with all shards, including new shards assertEquals(3, fetcher.getSubscribedShardsState().size()); KinesisStreamShardState restoredClosedShardState = fetcher.getSubscribedShardsState().get(0); assertEquals(TEST_STREAM_NAME, restoredClosedShardState.getStreamShardHandle().getStreamName()); assertEquals(TEST_SHARD_ID, restoredClosedShardState.getStreamShardHandle().getShard().getShardId()); assertTrue(restoredClosedShardState.getStreamShardHandle().isClosed()); assertEquals(TEST_SEQUENCE_NUMBER, restoredClosedShardState.getLastProcessedSequenceNum()); KinesisStreamShardState restoredNewSplitShard1 = fetcher.getSubscribedShardsState().get(1); assertEquals(TEST_STREAM_NAME, restoredNewSplitShard1.getStreamShardHandle().getStreamName()); assertEquals(KinesisShardIdGenerator.generateFromShardOrder(1), restoredNewSplitShard1.getStreamShardHandle().getShard().getShardId()); assertFalse(restoredNewSplitShard1.getStreamShardHandle().isClosed()); // new shards should be consumed from the beginning assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard1.getLastProcessedSequenceNum()); KinesisStreamShardState restoredNewSplitShard2 = fetcher.getSubscribedShardsState().get(2); assertEquals(TEST_STREAM_NAME, restoredNewSplitShard2.getStreamShardHandle().getStreamName()); assertEquals(KinesisShardIdGenerator.generateFromShardOrder(2), restoredNewSplitShard2.getStreamShardHandle().getShard().getShardId()); assertFalse(restoredNewSplitShard2.getStreamShardHandle().isClosed()); // new shards should be consumed from the beginning assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard2.getLastProcessedSequenceNum()); consumerOperator.close(); consumerOperator.cancel(); }
Example #19
Source File: FlinkKinesisConsumerMigrationTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testRestore() throws Exception { final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size()); for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) { Shard shard = new Shard(); shard.setShardId(shardMetadata.getShardId()); SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.withStartingSequenceNumber("1"); shard.setSequenceNumberRange(sequenceNumberRange); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard)); } final TestFetcher<String> fetcher = new TestFetcher<>( Collections.singletonList(TEST_STREAM_NAME), new TestSourceContext<>(), new TestRuntimeContext(true, 1, 0), TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), null, initialDiscoveryShards); final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>( fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema())); StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setup(); testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot")); testHarness.open(); consumerFunction.run(new TestSourceContext<>()); // assert that state is correctly restored assertNotEquals(null, consumerFunction.getRestoredState()); assertEquals(1, consumerFunction.getRestoredState().size()); assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState())); assertEquals(1, fetcher.getSubscribedShardsState().size()); assertEquals(TEST_SEQUENCE_NUMBER, fetcher.getSubscribedShardsState().get(0).getLastProcessedSequenceNum()); KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0); assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName()); assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId()); assertFalse(restoredShardState.getStreamShardHandle().isClosed()); assertEquals(TEST_SEQUENCE_NUMBER, restoredShardState.getLastProcessedSequenceNum()); consumerOperator.close(); consumerOperator.cancel(); }
Example #20
Source File: FlinkKinesisConsumerMigrationTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testRestoreWithEmptyState() throws Exception { final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size()); for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) { Shard shard = new Shard(); shard.setShardId(shardMetadata.getShardId()); SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.withStartingSequenceNumber("1"); shard.setSequenceNumberRange(sequenceNumberRange); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard)); } final TestFetcher<String> fetcher = new TestFetcher<>( Collections.singletonList(TEST_STREAM_NAME), new TestSourceContext<>(), new TestRuntimeContext(true, 1, 0), TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), null, initialDiscoveryShards); final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>( fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema())); StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setup(); testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kinesis-consumer-migration-test-flink" + testMigrateVersion + "-empty-snapshot")); testHarness.open(); consumerFunction.run(new TestSourceContext<>()); // assert that no state was restored assertTrue(consumerFunction.getRestoredState().isEmpty()); // although the restore state is empty, the fetcher should still have been registered the initial discovered shard; // furthermore, the discovered shard should be considered a newly created shard while the job wasn't running, // and therefore should be consumed from the earliest sequence number KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0); assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName()); assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId()); assertFalse(restoredShardState.getStreamShardHandle().isClosed()); assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredShardState.getLastProcessedSequenceNum()); consumerOperator.close(); consumerOperator.cancel(); }
Example #21
Source File: FlinkKinesisConsumerTest.java From flink with Apache License 2.0 | 4 votes |
/** * FLINK-8484: ensure that a state change in the StreamShardMetadata other than {@link StreamShardMetadata#getShardId()} or * {@link StreamShardMetadata#getStreamName()} does not result in the shard not being able to be restored. * This handles the corner case where the stored shard metadata is open (no ending sequence number), but after the * job restore, the shard has been closed (ending number set) due to re-sharding, and we can no longer rely on * {@link StreamShardMetadata#equals(Object)} to find back the sequence number in the collection of restored shard metadata. * <p></p> * Therefore, we will rely on synchronizing the snapshot's state with the Kinesis shard before attempting to find back * the sequence number to restore. */ @Test public void testFindSequenceNumberToRestoreFromIfTheShardHasBeenClosedSinceTheStateWasStored() throws Exception { // ---------------------------------------------------------------------- // setup initial state // ---------------------------------------------------------------------- HashMap<StreamShardHandle, SequenceNumber> fakeRestoredState = getFakeRestoredStore("all"); // ---------------------------------------------------------------------- // mock operator state backend and initial state for initializeState() // ---------------------------------------------------------------------- TestingListState<Tuple2<StreamShardMetadata, SequenceNumber>> listState = new TestingListState<>(); for (Map.Entry<StreamShardHandle, SequenceNumber> state : fakeRestoredState.entrySet()) { listState.add(Tuple2.of(KinesisDataFetcher.convertToStreamShardMetadata(state.getKey()), state.getValue())); } OperatorStateStore operatorStateStore = mock(OperatorStateStore.class); when(operatorStateStore.getUnionListState(Matchers.any(ListStateDescriptor.class))).thenReturn(listState); StateInitializationContext initializationContext = mock(StateInitializationContext.class); when(initializationContext.getOperatorStateStore()).thenReturn(operatorStateStore); when(initializationContext.isRestored()).thenReturn(true); // ---------------------------------------------------------------------- // mock fetcher // ---------------------------------------------------------------------- KinesisDataFetcher mockedFetcher = mockKinesisDataFetcher(); List<StreamShardHandle> shards = new ArrayList<>(); // create a fake stream shard handle based on the first entry in the restored state final StreamShardHandle originalStreamShardHandle = fakeRestoredState.keySet().iterator().next(); final StreamShardHandle closedStreamShardHandle = new StreamShardHandle(originalStreamShardHandle.getStreamName(), originalStreamShardHandle.getShard()); // close the shard handle by setting an ending sequence number final SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.setEndingSequenceNumber("1293844"); closedStreamShardHandle.getShard().setSequenceNumberRange(sequenceNumberRange); shards.add(closedStreamShardHandle); when(mockedFetcher.discoverNewShardsToSubscribe()).thenReturn(shards); // assume the given config is correct PowerMockito.mockStatic(KinesisConfigUtil.class); PowerMockito.doNothing().when(KinesisConfigUtil.class); // ---------------------------------------------------------------------- // start to test fetcher's initial state seeding // ---------------------------------------------------------------------- TestableFlinkKinesisConsumer consumer = new TestableFlinkKinesisConsumer( "fakeStream", new Properties(), 10, 2); consumer.initializeState(initializationContext); consumer.open(new Configuration()); consumer.run(Mockito.mock(SourceFunction.SourceContext.class)); Mockito.verify(mockedFetcher).registerNewSubscribedShardState( new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(closedStreamShardHandle), closedStreamShardHandle, fakeRestoredState.get(closedStreamShardHandle))); }
Example #22
Source File: FlinkKinesisConsumerMigrationTest.java From flink with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") private void writeSnapshot(String path, HashMap<StreamShardMetadata, SequenceNumber> state) throws Exception { final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(state.size()); for (StreamShardMetadata shardMetadata : state.keySet()) { Shard shard = new Shard(); shard.setShardId(shardMetadata.getShardId()); SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.withStartingSequenceNumber("1"); shard.setSequenceNumberRange(sequenceNumberRange); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard)); } final TestFetcher<String> fetcher = new TestFetcher<>( Collections.singletonList(TEST_STREAM_NAME), new TestSourceContext<>(), new TestRuntimeContext(true, 1, 0), TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), state, initialDiscoveryShards); final DummyFlinkKinesisConsumer<String> consumer = new DummyFlinkKinesisConsumer<>( fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema())); StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumer); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); testHarness.open(); final AtomicReference<Throwable> error = new AtomicReference<>(); // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { consumer.run(new TestSourceContext<>()); } catch (Throwable t) { t.printStackTrace(); error.set(t); } } }; runner.start(); fetcher.waitUntilRun(); final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle(snapshot, path); consumerOperator.close(); runner.join(); }
Example #23
Source File: FlinkKinesisConsumerMigrationTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testRestoreWithReshardedStream() throws Exception { final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size()); for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) { // setup the closed shard Shard closedShard = new Shard(); closedShard.setShardId(shardMetadata.getShardId()); SequenceNumberRange closedSequenceNumberRange = new SequenceNumberRange(); closedSequenceNumberRange.withStartingSequenceNumber("1"); closedSequenceNumberRange.withEndingSequenceNumber("1087654321"); // this represents a closed shard closedShard.setSequenceNumberRange(closedSequenceNumberRange); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), closedShard)); // setup the new shards Shard newSplitShard1 = new Shard(); newSplitShard1.setShardId(KinesisShardIdGenerator.generateFromShardOrder(1)); SequenceNumberRange newSequenceNumberRange1 = new SequenceNumberRange(); newSequenceNumberRange1.withStartingSequenceNumber("1087654322"); newSplitShard1.setSequenceNumberRange(newSequenceNumberRange1); newSplitShard1.setParentShardId(TEST_SHARD_ID); Shard newSplitShard2 = new Shard(); newSplitShard2.setShardId(KinesisShardIdGenerator.generateFromShardOrder(2)); SequenceNumberRange newSequenceNumberRange2 = new SequenceNumberRange(); newSequenceNumberRange2.withStartingSequenceNumber("2087654322"); newSplitShard2.setSequenceNumberRange(newSequenceNumberRange2); newSplitShard2.setParentShardId(TEST_SHARD_ID); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard1)); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard2)); } final TestFetcher<String> fetcher = new TestFetcher<>( Collections.singletonList(TEST_STREAM_NAME), new TestSourceContext<>(), new TestRuntimeContext(true, 1, 0), TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), null, initialDiscoveryShards); final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>( fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema())); StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setup(); testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot")); testHarness.open(); consumerFunction.run(new TestSourceContext<>()); // assert that state is correctly restored assertNotEquals(null, consumerFunction.getRestoredState()); assertEquals(1, consumerFunction.getRestoredState().size()); assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState())); // assert that the fetcher is registered with all shards, including new shards assertEquals(3, fetcher.getSubscribedShardsState().size()); KinesisStreamShardState restoredClosedShardState = fetcher.getSubscribedShardsState().get(0); assertEquals(TEST_STREAM_NAME, restoredClosedShardState.getStreamShardHandle().getStreamName()); assertEquals(TEST_SHARD_ID, restoredClosedShardState.getStreamShardHandle().getShard().getShardId()); assertTrue(restoredClosedShardState.getStreamShardHandle().isClosed()); assertEquals(TEST_SEQUENCE_NUMBER, restoredClosedShardState.getLastProcessedSequenceNum()); KinesisStreamShardState restoredNewSplitShard1 = fetcher.getSubscribedShardsState().get(1); assertEquals(TEST_STREAM_NAME, restoredNewSplitShard1.getStreamShardHandle().getStreamName()); assertEquals(KinesisShardIdGenerator.generateFromShardOrder(1), restoredNewSplitShard1.getStreamShardHandle().getShard().getShardId()); assertFalse(restoredNewSplitShard1.getStreamShardHandle().isClosed()); // new shards should be consumed from the beginning assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard1.getLastProcessedSequenceNum()); KinesisStreamShardState restoredNewSplitShard2 = fetcher.getSubscribedShardsState().get(2); assertEquals(TEST_STREAM_NAME, restoredNewSplitShard2.getStreamShardHandle().getStreamName()); assertEquals(KinesisShardIdGenerator.generateFromShardOrder(2), restoredNewSplitShard2.getStreamShardHandle().getShard().getShardId()); assertFalse(restoredNewSplitShard2.getStreamShardHandle().isClosed()); // new shards should be consumed from the beginning assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard2.getLastProcessedSequenceNum()); consumerOperator.close(); consumerOperator.cancel(); }
Example #24
Source File: FlinkKinesisConsumerMigrationTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testRestore() throws Exception { final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size()); for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) { Shard shard = new Shard(); shard.setShardId(shardMetadata.getShardId()); SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.withStartingSequenceNumber("1"); shard.setSequenceNumberRange(sequenceNumberRange); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard)); } final TestFetcher<String> fetcher = new TestFetcher<>( Collections.singletonList(TEST_STREAM_NAME), new TestSourceContext<>(), new TestRuntimeContext(true, 1, 0), TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), null, initialDiscoveryShards); final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>( fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema())); StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setup(); testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot")); testHarness.open(); consumerFunction.run(new TestSourceContext<>()); // assert that state is correctly restored assertNotEquals(null, consumerFunction.getRestoredState()); assertEquals(1, consumerFunction.getRestoredState().size()); assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState())); assertEquals(1, fetcher.getSubscribedShardsState().size()); assertEquals(TEST_SEQUENCE_NUMBER, fetcher.getSubscribedShardsState().get(0).getLastProcessedSequenceNum()); KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0); assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName()); assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId()); assertFalse(restoredShardState.getStreamShardHandle().isClosed()); assertEquals(TEST_SEQUENCE_NUMBER, restoredShardState.getLastProcessedSequenceNum()); consumerOperator.close(); consumerOperator.cancel(); }
Example #25
Source File: FlinkKinesisConsumerMigrationTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testRestoreWithEmptyState() throws Exception { final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size()); for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) { Shard shard = new Shard(); shard.setShardId(shardMetadata.getShardId()); SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.withStartingSequenceNumber("1"); shard.setSequenceNumberRange(sequenceNumberRange); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard)); } final TestFetcher<String> fetcher = new TestFetcher<>( Collections.singletonList(TEST_STREAM_NAME), new TestSourceContext<>(), new TestRuntimeContext(true, 1, 0), TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), null, initialDiscoveryShards); final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>( fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema())); StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setup(); testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kinesis-consumer-migration-test-flink" + testMigrateVersion + "-empty-snapshot")); testHarness.open(); consumerFunction.run(new TestSourceContext<>()); // assert that no state was restored assertTrue(consumerFunction.getRestoredState().isEmpty()); // although the restore state is empty, the fetcher should still have been registered the initial discovered shard; // furthermore, the discovered shard should be considered a newly created shard while the job wasn't running, // and therefore should be consumed from the earliest sequence number KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0); assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName()); assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId()); assertFalse(restoredShardState.getStreamShardHandle().isClosed()); assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredShardState.getLastProcessedSequenceNum()); consumerOperator.close(); consumerOperator.cancel(); }
Example #26
Source File: FlinkKinesisConsumerMigrationTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testRestore() throws Exception { final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size()); for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) { Shard shard = new Shard(); shard.setShardId(shardMetadata.getShardId()); SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.withStartingSequenceNumber("1"); shard.setSequenceNumberRange(sequenceNumberRange); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard)); } final TestFetcher<String> fetcher = new TestFetcher<>( Collections.singletonList(TEST_STREAM_NAME), new TestSourceContext<>(), new TestRuntimeContext(true, 1, 0), TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), null, initialDiscoveryShards); final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>( fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema())); StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setup(); testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot")); testHarness.open(); consumerFunction.run(new TestSourceContext<>()); // assert that state is correctly restored assertNotEquals(null, consumerFunction.getRestoredState()); assertEquals(1, consumerFunction.getRestoredState().size()); assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState())); assertEquals(1, fetcher.getSubscribedShardsState().size()); assertEquals(TEST_SEQUENCE_NUMBER, fetcher.getSubscribedShardsState().get(0).getLastProcessedSequenceNum()); KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0); assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName()); assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId()); assertFalse(restoredShardState.getStreamShardHandle().isClosed()); assertEquals(TEST_SEQUENCE_NUMBER, restoredShardState.getLastProcessedSequenceNum()); consumerOperator.close(); consumerOperator.cancel(); }
Example #27
Source File: FlinkKinesisConsumerMigrationTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testRestoreWithReshardedStream() throws Exception { final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size()); for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) { // setup the closed shard Shard closedShard = new Shard(); closedShard.setShardId(shardMetadata.getShardId()); SequenceNumberRange closedSequenceNumberRange = new SequenceNumberRange(); closedSequenceNumberRange.withStartingSequenceNumber("1"); closedSequenceNumberRange.withEndingSequenceNumber("1087654321"); // this represents a closed shard closedShard.setSequenceNumberRange(closedSequenceNumberRange); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), closedShard)); // setup the new shards Shard newSplitShard1 = new Shard(); newSplitShard1.setShardId(KinesisShardIdGenerator.generateFromShardOrder(1)); SequenceNumberRange newSequenceNumberRange1 = new SequenceNumberRange(); newSequenceNumberRange1.withStartingSequenceNumber("1087654322"); newSplitShard1.setSequenceNumberRange(newSequenceNumberRange1); newSplitShard1.setParentShardId(TEST_SHARD_ID); Shard newSplitShard2 = new Shard(); newSplitShard2.setShardId(KinesisShardIdGenerator.generateFromShardOrder(2)); SequenceNumberRange newSequenceNumberRange2 = new SequenceNumberRange(); newSequenceNumberRange2.withStartingSequenceNumber("2087654322"); newSplitShard2.setSequenceNumberRange(newSequenceNumberRange2); newSplitShard2.setParentShardId(TEST_SHARD_ID); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard1)); initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard2)); } final TestFetcher<String> fetcher = new TestFetcher<>( Collections.singletonList(TEST_STREAM_NAME), new TestSourceContext<>(), new TestRuntimeContext(true, 1, 0), TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), null, initialDiscoveryShards); final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>( fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema())); StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setup(); testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot")); testHarness.open(); consumerFunction.run(new TestSourceContext<>()); // assert that state is correctly restored assertNotEquals(null, consumerFunction.getRestoredState()); assertEquals(1, consumerFunction.getRestoredState().size()); assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState())); // assert that the fetcher is registered with all shards, including new shards assertEquals(3, fetcher.getSubscribedShardsState().size()); KinesisStreamShardState restoredClosedShardState = fetcher.getSubscribedShardsState().get(0); assertEquals(TEST_STREAM_NAME, restoredClosedShardState.getStreamShardHandle().getStreamName()); assertEquals(TEST_SHARD_ID, restoredClosedShardState.getStreamShardHandle().getShard().getShardId()); assertTrue(restoredClosedShardState.getStreamShardHandle().isClosed()); assertEquals(TEST_SEQUENCE_NUMBER, restoredClosedShardState.getLastProcessedSequenceNum()); KinesisStreamShardState restoredNewSplitShard1 = fetcher.getSubscribedShardsState().get(1); assertEquals(TEST_STREAM_NAME, restoredNewSplitShard1.getStreamShardHandle().getStreamName()); assertEquals(KinesisShardIdGenerator.generateFromShardOrder(1), restoredNewSplitShard1.getStreamShardHandle().getShard().getShardId()); assertFalse(restoredNewSplitShard1.getStreamShardHandle().isClosed()); // new shards should be consumed from the beginning assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard1.getLastProcessedSequenceNum()); KinesisStreamShardState restoredNewSplitShard2 = fetcher.getSubscribedShardsState().get(2); assertEquals(TEST_STREAM_NAME, restoredNewSplitShard2.getStreamShardHandle().getStreamName()); assertEquals(KinesisShardIdGenerator.generateFromShardOrder(2), restoredNewSplitShard2.getStreamShardHandle().getShard().getShardId()); assertFalse(restoredNewSplitShard2.getStreamShardHandle().isClosed()); // new shards should be consumed from the beginning assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard2.getLastProcessedSequenceNum()); consumerOperator.close(); consumerOperator.cancel(); }
Example #28
Source File: FlinkKinesisConsumerTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * FLINK-8484: ensure that a state change in the StreamShardMetadata other than {@link StreamShardMetadata#shardId} or * {@link StreamShardMetadata#streamName} does not result in the shard not being able to be restored. * This handles the corner case where the stored shard metadata is open (no ending sequence number), but after the * job restore, the shard has been closed (ending number set) due to re-sharding, and we can no longer rely on * {@link StreamShardMetadata#equals(Object)} to find back the sequence number in the collection of restored shard metadata. * <p></p> * Therefore, we will rely on synchronizing the snapshot's state with the Kinesis shard before attempting to find back * the sequence number to restore. */ @Test public void testFindSequenceNumberToRestoreFromIfTheShardHasBeenClosedSinceTheStateWasStored() throws Exception { // ---------------------------------------------------------------------- // setup initial state // ---------------------------------------------------------------------- HashMap<StreamShardHandle, SequenceNumber> fakeRestoredState = getFakeRestoredStore("all"); // ---------------------------------------------------------------------- // mock operator state backend and initial state for initializeState() // ---------------------------------------------------------------------- TestingListState<Tuple2<StreamShardMetadata, SequenceNumber>> listState = new TestingListState<>(); for (Map.Entry<StreamShardHandle, SequenceNumber> state : fakeRestoredState.entrySet()) { listState.add(Tuple2.of(KinesisDataFetcher.convertToStreamShardMetadata(state.getKey()), state.getValue())); } OperatorStateStore operatorStateStore = mock(OperatorStateStore.class); when(operatorStateStore.getUnionListState(Matchers.any(ListStateDescriptor.class))).thenReturn(listState); StateInitializationContext initializationContext = mock(StateInitializationContext.class); when(initializationContext.getOperatorStateStore()).thenReturn(operatorStateStore); when(initializationContext.isRestored()).thenReturn(true); // ---------------------------------------------------------------------- // mock fetcher // ---------------------------------------------------------------------- KinesisDataFetcher mockedFetcher = mockKinesisDataFetcher(); List<StreamShardHandle> shards = new ArrayList<>(); // create a fake stream shard handle based on the first entry in the restored state final StreamShardHandle originalStreamShardHandle = fakeRestoredState.keySet().iterator().next(); final StreamShardHandle closedStreamShardHandle = new StreamShardHandle(originalStreamShardHandle.getStreamName(), originalStreamShardHandle.getShard()); // close the shard handle by setting an ending sequence number final SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); sequenceNumberRange.setEndingSequenceNumber("1293844"); closedStreamShardHandle.getShard().setSequenceNumberRange(sequenceNumberRange); shards.add(closedStreamShardHandle); when(mockedFetcher.discoverNewShardsToSubscribe()).thenReturn(shards); // assume the given config is correct PowerMockito.mockStatic(KinesisConfigUtil.class); PowerMockito.doNothing().when(KinesisConfigUtil.class); // ---------------------------------------------------------------------- // start to test fetcher's initial state seeding // ---------------------------------------------------------------------- TestableFlinkKinesisConsumer consumer = new TestableFlinkKinesisConsumer( "fakeStream", new Properties(), 10, 2); consumer.initializeState(initializationContext); consumer.open(new Configuration()); consumer.run(Mockito.mock(SourceFunction.SourceContext.class)); Mockito.verify(mockedFetcher).registerNewSubscribedShardState( new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(closedStreamShardHandle), closedStreamShardHandle, fakeRestoredState.get(closedStreamShardHandle))); }