org.apache.flink.streaming.connectors.kafka.internals.AbstractPartitionDiscoverer Java Examples
The following examples show how to use
org.apache.flink.streaming.connectors.kafka.internals.AbstractPartitionDiscoverer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 6 votes |
@Override protected AbstractPartitionDiscoverer createPartitionDiscoverer( KafkaTopicsDescriptor topicsDescriptor, int indexOfThisSubtask, int numParallelSubtasks) { AbstractPartitionDiscoverer mockPartitionDiscoverer = mock(AbstractPartitionDiscoverer.class); try { when(mockPartitionDiscoverer.discoverPartitions()).thenReturn(partitions); } catch (Exception e) { // ignore } when(mockPartitionDiscoverer.setAndCheckDiscoveredPartition(any(KafkaTopicPartition.class))).thenReturn(true); return mockPartitionDiscoverer; }
Example #2
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 6 votes |
/** * Tests that no checkpoints happen when the fetcher is not running. */ @Test public void ignoreCheckpointWhenNotRunning() throws Exception { @SuppressWarnings("unchecked") final MockFetcher<String> fetcher = new MockFetcher<>(); final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>( fetcher, mock(AbstractPartitionDiscoverer.class), false); final TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>(); setupConsumer(consumer, false, listState, true, 0, 1); // snapshot before the fetcher starts running consumer.snapshotState(new StateSnapshotContextSynchronousImpl(1, 1)); // no state should have been checkpointed assertFalse(listState.get().iterator().hasNext()); // acknowledgement of the checkpoint should also not result in any offset commits consumer.notifyCheckpointComplete(1L); assertNull(fetcher.getAndClearLastCommittedOffsets()); assertEquals(0, fetcher.getCommitCount()); }
Example #3
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaConsumer( SupplierWithException<AbstractFetcher<T, ?>, Exception> testFetcherSupplier, AbstractPartitionDiscoverer testPartitionDiscoverer, boolean isAutoCommitEnabled, long discoveryIntervalMillis, List<String> topics) { super( topics, null, (KeyedDeserializationSchema< T >) mock(KeyedDeserializationSchema.class), discoveryIntervalMillis, false); this.testFetcherSupplier = testFetcherSupplier; this.testPartitionDiscoverer = testPartitionDiscoverer; this.isAutoCommitEnabled = isAutoCommitEnabled; }
Example #4
Source File: FlinkKafkaConsumerBaseTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaConsumer( SupplierWithException<AbstractFetcher<T, ?>, Exception> testFetcherSupplier, AbstractPartitionDiscoverer testPartitionDiscoverer, boolean isAutoCommitEnabled, long discoveryIntervalMillis, List<String> topics) { super( topics, null, (KeyedDeserializationSchema< T >) mock(KeyedDeserializationSchema.class), discoveryIntervalMillis, false); this.testFetcherSupplier = testFetcherSupplier; this.testPartitionDiscoverer = testPartitionDiscoverer; this.isAutoCommitEnabled = isAutoCommitEnabled; }
Example #5
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 6 votes |
/** * Tests that no checkpoints happen when the fetcher is not running. */ @Test public void ignoreCheckpointWhenNotRunning() throws Exception { @SuppressWarnings("unchecked") final MockFetcher<String> fetcher = new MockFetcher<>(); final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>( fetcher, mock(AbstractPartitionDiscoverer.class), false); final TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>(); setupConsumer(consumer, false, listState, true, 0, 1); // snapshot before the fetcher starts running consumer.snapshotState(new StateSnapshotContextSynchronousImpl(1, 1)); // no state should have been checkpointed assertFalse(listState.get().iterator().hasNext()); // acknowledgement of the checkpoint should also not result in any offset commits consumer.notifyCheckpointComplete(1L); assertNull(fetcher.getAndClearLastCommittedOffsets()); assertEquals(0, fetcher.getCommitCount()); }
Example #6
Source File: FlinkKafkaConsumerBaseTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Tests that no checkpoints happen when the fetcher is not running. */ @Test public void ignoreCheckpointWhenNotRunning() throws Exception { @SuppressWarnings("unchecked") final MockFetcher<String> fetcher = new MockFetcher<>(); final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>( fetcher, mock(AbstractPartitionDiscoverer.class), false); final TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>(); setupConsumer(consumer, false, listState, true, 0, 1); // snapshot before the fetcher starts running consumer.snapshotState(new StateSnapshotContextSynchronousImpl(1, 1)); // no state should have been checkpointed assertFalse(listState.get().iterator().hasNext()); // acknowledgement of the checkpoint should also not result in any offset commits consumer.notifyCheckpointComplete(1L); assertNull(fetcher.getAndClearLastCommittedOffsets()); assertEquals(0, fetcher.getCommitCount()); }
Example #7
Source File: FlinkKafkaConsumerBaseMigrationTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override protected AbstractPartitionDiscoverer createPartitionDiscoverer( KafkaTopicsDescriptor topicsDescriptor, int indexOfThisSubtask, int numParallelSubtasks) { AbstractPartitionDiscoverer mockPartitionDiscoverer = mock(AbstractPartitionDiscoverer.class); try { when(mockPartitionDiscoverer.discoverPartitions()).thenReturn(partitions); } catch (Exception e) { // ignore } when(mockPartitionDiscoverer.setAndCheckDiscoveredPartition(any(KafkaTopicPartition.class))).thenReturn(true); return mockPartitionDiscoverer; }
Example #8
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 6 votes |
@Override protected AbstractPartitionDiscoverer createPartitionDiscoverer( KafkaTopicsDescriptor topicsDescriptor, int indexOfThisSubtask, int numParallelSubtasks) { AbstractPartitionDiscoverer mockPartitionDiscoverer = mock(AbstractPartitionDiscoverer.class); try { when(mockPartitionDiscoverer.discoverPartitions()).thenReturn(partitions); } catch (Exception e) { // ignore } when(mockPartitionDiscoverer.setAndCheckDiscoveredPartition(any(KafkaTopicPartition.class))).thenReturn(true); return mockPartitionDiscoverer; }
Example #9
Source File: KafkaPartitionDiscoverer.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override protected List<KafkaTopicPartition> getAllPartitionsForTopics(List<String> topics) throws AbstractPartitionDiscoverer.WakeupException { List<KafkaTopicPartition> partitions = new LinkedList<>(); try { for (String topic : topics) { for (PartitionInfo partitionInfo : kafkaConsumer.partitionsFor(topic)) { partitions.add(new KafkaTopicPartition(partitionInfo.topic(), partitionInfo.partition())); } } } catch (org.apache.kafka.common.errors.WakeupException e) { // rethrow our own wakeup exception throw new AbstractPartitionDiscoverer.WakeupException(); } return partitions; }
Example #10
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaConsumer( SupplierWithException<AbstractFetcher<T, ?>, Exception> testFetcherSupplier, AbstractPartitionDiscoverer testPartitionDiscoverer, boolean isAutoCommitEnabled, long discoveryIntervalMillis, List<String> topics, KafkaDeserializationSchema<T> mock) { super( topics, null, mock, discoveryIntervalMillis, false); this.testFetcherSupplier = testFetcherSupplier; this.testPartitionDiscoverer = testPartitionDiscoverer; this.isAutoCommitEnabled = isAutoCommitEnabled; }
Example #11
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
private void checkFilterRestoredPartitionsWithDisovered( List<String> restoredKafkaTopics, List<String> initKafkaTopics, List<String> expectedSubscribedPartitions, Boolean disableFiltering) throws Exception { final AbstractPartitionDiscoverer discoverer = new TestPartitionDiscoverer( new KafkaTopicsDescriptor(initKafkaTopics, null), 0, 1, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(initKafkaTopics), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn( initKafkaTopics.stream() .map(topic -> new KafkaTopicPartition(topic, 0)) .collect(Collectors.toList()))); final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>(initKafkaTopics, discoverer); if (disableFiltering) { consumer.disableFilterRestoredPartitionsWithSubscribedTopics(); } final TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>(); for (int i = 0; i < restoredKafkaTopics.size(); i++) { listState.add(new Tuple2<>(new KafkaTopicPartition(restoredKafkaTopics.get(i), 0), 12345L)); } setupConsumer(consumer, true, listState, true, 0, 1); Map<KafkaTopicPartition, Long> subscribedPartitionsToStartOffsets = consumer.getSubscribedPartitionsToStartOffsets(); assertEquals(new HashSet<>(expectedSubscribedPartitions), subscribedPartitionsToStartOffsets .keySet() .stream() .map(partition -> partition.getTopic()) .collect(Collectors.toSet())); }
Example #12
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaConsumer( SupplierWithException<AbstractFetcher<T, ?>, Exception> testFetcherSupplier, AbstractPartitionDiscoverer testPartitionDiscoverer, boolean isAutoCommitEnabled, long discoveryIntervalMillis) { this( testFetcherSupplier, testPartitionDiscoverer, isAutoCommitEnabled, discoveryIntervalMillis, Collections.singletonList("dummy-topic"), (KeyedDeserializationSchema<T>) mock(KeyedDeserializationSchema.class) ); }
Example #13
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaConsumer( AbstractFetcher<T, ?> testFetcher, AbstractPartitionDiscoverer testPartitionDiscoverer, boolean isAutoCommitEnabled, long discoveryIntervalMillis) { this( () -> testFetcher, testPartitionDiscoverer, isAutoCommitEnabled, discoveryIntervalMillis); }
Example #14
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaConsumer( SupplierWithException<AbstractFetcher<T, ?>, Exception> abstractFetcherSupplier, AbstractPartitionDiscoverer abstractPartitionDiscoverer, long discoveryIntervalMillis) { this( abstractFetcherSupplier, abstractPartitionDiscoverer, false, discoveryIntervalMillis); }
Example #15
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaConsumer(List<String> topics, AbstractPartitionDiscoverer abstractPartitionDiscoverer) { this( () -> mock(AbstractFetcher.class), abstractPartitionDiscoverer, false, PARTITION_DISCOVERY_DISABLED, topics, (KeyedDeserializationSchema<T>) mock(KeyedDeserializationSchema.class)); }
Example #16
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaConsumer(KafkaDeserializationSchema<T> kafkaDeserializationSchema) { this( () -> mock(AbstractFetcher.class), mock(AbstractPartitionDiscoverer.class), false, PARTITION_DISCOVERY_DISABLED, Collections.singletonList("dummy-topic"), kafkaDeserializationSchema); }
Example #17
Source File: FlinkKafkaConsumer08.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override protected AbstractPartitionDiscoverer createPartitionDiscoverer( KafkaTopicsDescriptor topicsDescriptor, int indexOfThisSubtask, int numParallelSubtasks) { return new Kafka08PartitionDiscoverer(topicsDescriptor, indexOfThisSubtask, numParallelSubtasks, kafkaProperties); }
Example #18
Source File: KafkaPartitionDiscoverer.java From flink with Apache License 2.0 | 5 votes |
@Override protected List<String> getAllTopics() throws AbstractPartitionDiscoverer.WakeupException { try { return new ArrayList<>(kafkaConsumer.listTopics().keySet()); } catch (org.apache.kafka.common.errors.WakeupException e) { // rethrow our own wakeup exception throw new AbstractPartitionDiscoverer.WakeupException(); } }
Example #19
Source File: FlinkKafkaConsumer010.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractPartitionDiscoverer createPartitionDiscoverer( KafkaTopicsDescriptor topicsDescriptor, int indexOfThisSubtask, int numParallelSubtasks) { return new Kafka010PartitionDiscoverer(topicsDescriptor, indexOfThisSubtask, numParallelSubtasks, properties); }
Example #20
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaConsumer( AbstractFetcher<T, ?> testFetcher, AbstractPartitionDiscoverer testPartitionDiscoverer, boolean isAutoCommitEnabled) { this( testFetcher, testPartitionDiscoverer, isAutoCommitEnabled, PARTITION_DISCOVERY_DISABLED); }
Example #21
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
TestingFlinkKafkaConsumer(final AbstractPartitionDiscoverer partitionDiscoverer, long discoveryIntervalMillis) { super(Collections.singletonList("dummy-topic"), null, (KafkaDeserializationSchema < T >) mock(KafkaDeserializationSchema.class), discoveryIntervalMillis, false); this.partitionDiscoverer = partitionDiscoverer; }
Example #22
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractPartitionDiscoverer createPartitionDiscoverer( KafkaTopicsDescriptor topicsDescriptor, int indexOfThisSubtask, int numParallelSubtasks) { return this.testPartitionDiscoverer; }
Example #23
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaConsumer( SupplierWithException<AbstractFetcher<T, ?>, Exception> testFetcherSupplier, AbstractPartitionDiscoverer testPartitionDiscoverer, boolean isAutoCommitEnabled, long discoveryIntervalMillis) { this( testFetcherSupplier, testPartitionDiscoverer, isAutoCommitEnabled, discoveryIntervalMillis, Collections.singletonList("dummy-topic") ); }
Example #24
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaConsumer( AbstractFetcher<T, ?> testFetcher, AbstractPartitionDiscoverer testPartitionDiscoverer, boolean isAutoCommitEnabled, long discoveryIntervalMillis) { this( () -> testFetcher, testPartitionDiscoverer, isAutoCommitEnabled, discoveryIntervalMillis); }
Example #25
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaConsumer( AbstractFetcher<T, ?> testFetcher, AbstractPartitionDiscoverer testPartitionDiscoverer, boolean isAutoCommitEnabled) { this( testFetcher, testPartitionDiscoverer, isAutoCommitEnabled, PARTITION_DISCOVERY_DISABLED); }
Example #26
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaConsumer(List<String> topics, AbstractPartitionDiscoverer abstractPartitionDiscoverer) { this( () -> mock(AbstractFetcher.class), abstractPartitionDiscoverer, false, PARTITION_DISCOVERY_DISABLED, topics); }
Example #27
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
private void checkFilterRestoredPartitionsWithDisovered( List<String> restoredKafkaTopics, List<String> initKafkaTopics, List<String> expectedSubscribedPartitions, Boolean disableFiltering) throws Exception { final AbstractPartitionDiscoverer discoverer = new TestPartitionDiscoverer( new KafkaTopicsDescriptor(initKafkaTopics, null), 0, 1, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(initKafkaTopics), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn( initKafkaTopics.stream() .map(topic -> new KafkaTopicPartition(topic, 0)) .collect(Collectors.toList()))); final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>(initKafkaTopics, discoverer); if (disableFiltering) { consumer.disableFilterRestoredPartitionsWithSubscribedTopics(); } final TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>(); for (int i = 0; i < restoredKafkaTopics.size(); i++) { listState.add(new Tuple2<>(new KafkaTopicPartition(restoredKafkaTopics.get(i), 0), 12345L)); } setupConsumer(consumer, true, listState, true, 0, 1); Map<KafkaTopicPartition, Long> subscribedPartitionsToStartOffsets = consumer.getSubscribedPartitionsToStartOffsets(); assertEquals(new HashSet<>(expectedSubscribedPartitions), subscribedPartitionsToStartOffsets .keySet() .stream() .map(partition -> partition.getTopic()) .collect(Collectors.toSet())); }
Example #28
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractPartitionDiscoverer createPartitionDiscoverer( KafkaTopicsDescriptor topicsDescriptor, int indexOfThisSubtask, int numParallelSubtasks) { return this.testPartitionDiscoverer; }
Example #29
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
TestingFlinkKafkaConsumer(final AbstractPartitionDiscoverer partitionDiscoverer, long discoveryIntervalMillis) { super(Collections.singletonList("dummy-topic"), null, (KafkaDeserializationSchema < T >) mock(KafkaDeserializationSchema.class), discoveryIntervalMillis, false); this.partitionDiscoverer = partitionDiscoverer; }
Example #30
Source File: FlinkKafkaConsumer.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractPartitionDiscoverer createPartitionDiscoverer( KafkaTopicsDescriptor topicsDescriptor, int indexOfThisSubtask, int numParallelSubtasks) { return new KafkaPartitionDiscoverer(topicsDescriptor, indexOfThisSubtask, numParallelSubtasks, properties); }