org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition Java Examples
The following examples show how to use
org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaConsumer( AbstractFetcher<T, ?> fetcher, List<String> topics, List<KafkaTopicPartition> partitions, long discoveryInterval) { super( topics, null, (KafkaDeserializationSchema< T >) mock(KafkaDeserializationSchema.class), discoveryInterval, false); this.fetcher = fetcher; this.partitions = partitions; }
Example #2
Source File: KafkaTableSourceSinkFactory.java From flink with Apache License 2.0 | 6 votes |
@Override protected KafkaTableSourceBase createKafkaTableSource( TableSchema schema, Optional<String> proctimeAttribute, List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors, Map<String, String> fieldMapping, String topic, Properties properties, DeserializationSchema<Row> deserializationSchema, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets, long startupTimestampMillis) { return new KafkaTableSource( schema, proctimeAttribute, rowtimeAttributeDescriptors, Optional.of(fieldMapping), topic, properties, deserializationSchema, startupMode, specificStartupOffsets, startupTimestampMillis); }
Example #3
Source File: KafkaConsumerTestBase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Variant of {@link KafkaConsumerTestBase#readSequence(StreamExecutionEnvironment, StartupMode, Map, Long, Properties, String, Map)} to * expect reading from the same start offset and the same value count for all partitions of a single Kafka topic. */ protected void readSequence(final StreamExecutionEnvironment env, final StartupMode startupMode, final Map<KafkaTopicPartition, Long> specificStartupOffsets, final Long startupTimestamp, final Properties cc, final int sourceParallelism, final String topicName, final int valuesCount, final int startFrom) throws Exception { HashMap<Integer, Tuple2<Integer, Integer>> partitionsToValuesCountAndStartOffset = new HashMap<>(); for (int i = 0; i < sourceParallelism; i++) { partitionsToValuesCountAndStartOffset.put(i, new Tuple2<>(valuesCount, startFrom)); } readSequence(env, startupMode, specificStartupOffsets, startupTimestamp, cc, topicName, partitionsToValuesCountAndStartOffset); }
Example #4
Source File: Kafka010TableSourceSinkFactoryTest.java From flink with Apache License 2.0 | 6 votes |
@Override protected KafkaTableSourceBase getExpectedKafkaTableSource( TableSchema schema, Optional<String> proctimeAttribute, List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors, Map<String, String> fieldMapping, String topic, Properties properties, DeserializationSchema<Row> deserializationSchema, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets) { return new Kafka010TableSource( schema, proctimeAttribute, rowtimeAttributeDescriptors, Optional.of(fieldMapping), topic, properties, deserializationSchema, startupMode, specificStartupOffsets ); }
Example #5
Source File: Kafka011TableSourceSinkFactoryTest.java From flink with Apache License 2.0 | 6 votes |
@Override protected KafkaTableSourceBase getExpectedKafkaTableSource( TableSchema schema, Optional<String> proctimeAttribute, List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors, Map<String, String> fieldMapping, String topic, Properties properties, DeserializationSchema<Row> deserializationSchema, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets, long startupTimestampMillis) { return new Kafka011TableSource( schema, proctimeAttribute, rowtimeAttributeDescriptors, Optional.of(fieldMapping), topic, properties, deserializationSchema, startupMode, specificStartupOffsets, startupTimestampMillis ); }
Example #6
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 6 votes |
@Override protected AbstractPartitionDiscoverer createPartitionDiscoverer( KafkaTopicsDescriptor topicsDescriptor, int indexOfThisSubtask, int numParallelSubtasks) { AbstractPartitionDiscoverer mockPartitionDiscoverer = mock(AbstractPartitionDiscoverer.class); try { when(mockPartitionDiscoverer.discoverPartitions()).thenReturn(partitions); } catch (Exception e) { // ignore } when(mockPartitionDiscoverer.setAndCheckDiscoveredPartition(any(KafkaTopicPartition.class))).thenReturn(true); return mockPartitionDiscoverer; }
Example #7
Source File: Kafka09TableSourceSinkFactory.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override protected KafkaTableSourceBase createKafkaTableSource( TableSchema schema, Optional<String> proctimeAttribute, List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors, Map<String, String> fieldMapping, String topic, Properties properties, DeserializationSchema<Row> deserializationSchema, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets) { return new Kafka09TableSource( schema, proctimeAttribute, rowtimeAttributeDescriptors, Optional.of(fieldMapping), topic, properties, deserializationSchema, startupMode, specificStartupOffsets); }
Example #8
Source File: Kafka010DynamicSource.java From flink with Apache License 2.0 | 6 votes |
/** * Creates a Kafka 0.10 {@link StreamTableSource}. * * @param outputDataType Source output data type * @param topic Kafka topic to consume * @param properties Properties for the Kafka consumer * @param decodingFormat Decoding format for decoding records from Kafka * @param startupMode Startup mode for the contained consumer * @param specificStartupOffsets Specific startup offsets; only relevant when startup * mode is {@link StartupMode#SPECIFIC_OFFSETS} * @param startupTimestampMillis Startup timestamp for offsets; only relevant when startup * mode is {@link StartupMode#TIMESTAMP} */ public Kafka010DynamicSource( DataType outputDataType, String topic, Properties properties, DecodingFormat<DeserializationSchema<RowData>> decodingFormat, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets, long startupTimestampMillis) { super( outputDataType, topic, properties, decodingFormat, startupMode, specificStartupOffsets, startupTimestampMillis); }
Example #9
Source File: KafkaTableSourceBase.java From flink with Apache License 2.0 | 6 votes |
/** * Creates a generic Kafka {@link StreamTableSource}. * * @param schema Schema of the produced table. * @param proctimeAttribute Field name of the processing time attribute. * @param rowtimeAttributeDescriptors Descriptor for a rowtime attribute * @param fieldMapping Mapping for the fields of the table schema to * fields of the physical returned type. * @param topic Kafka topic to consume. * @param properties Properties for the Kafka consumer. * @param deserializationSchema Deserialization schema for decoding records from Kafka. * @param startupMode Startup mode for the contained consumer. * @param specificStartupOffsets Specific startup offsets; only relevant when startup * mode is {@link StartupMode#SPECIFIC_OFFSETS}. * @param startupTimestampMillis Startup timestamp for offsets; only relevant when startup * mode is {@link StartupMode#TIMESTAMP}. */ protected KafkaTableSourceBase( TableSchema schema, Optional<String> proctimeAttribute, List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors, Optional<Map<String, String>> fieldMapping, String topic, Properties properties, DeserializationSchema<Row> deserializationSchema, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets, long startupTimestampMillis) { this.schema = TableSchemaUtils.checkNoGeneratedColumns(schema); this.proctimeAttribute = validateProctimeAttribute(proctimeAttribute); this.rowtimeAttributeDescriptors = validateRowtimeAttributeDescriptors(rowtimeAttributeDescriptors); this.fieldMapping = fieldMapping; this.topic = Preconditions.checkNotNull(topic, "Topic must not be null."); this.properties = Preconditions.checkNotNull(properties, "Properties must not be null."); this.deserializationSchema = Preconditions.checkNotNull( deserializationSchema, "Deserialization schema must not be null."); this.startupMode = Preconditions.checkNotNull(startupMode, "Startup mode must not be null."); this.specificStartupOffsets = Preconditions.checkNotNull( specificStartupOffsets, "Specific offsets must not be null."); this.startupTimestampMillis = startupTimestampMillis; }
Example #10
Source File: KafkaDynamicSource.java From flink with Apache License 2.0 | 6 votes |
/** * Creates a generic Kafka {@link StreamTableSource}. * * @param outputDataType Source output data type * @param topic Kafka topic to consume * @param properties Properties for the Kafka consumer * @param decodingFormat Decoding format for decoding records from Kafka * @param startupMode Startup mode for the contained consumer * @param specificStartupOffsets Specific startup offsets; only relevant when startup * mode is {@link StartupMode#SPECIFIC_OFFSETS} */ public KafkaDynamicSource( DataType outputDataType, String topic, Properties properties, DecodingFormat<DeserializationSchema<RowData>> decodingFormat, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets, long startupTimestampMillis) { super( outputDataType, topic, properties, decodingFormat, startupMode, specificStartupOffsets, startupTimestampMillis); }
Example #11
Source File: FlinkKafkaConsumerBaseTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Tests that no checkpoints happen when the fetcher is not running. */ @Test public void ignoreCheckpointWhenNotRunning() throws Exception { @SuppressWarnings("unchecked") final MockFetcher<String> fetcher = new MockFetcher<>(); final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>( fetcher, mock(AbstractPartitionDiscoverer.class), false); final TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>(); setupConsumer(consumer, false, listState, true, 0, 1); // snapshot before the fetcher starts running consumer.snapshotState(new StateSnapshotContextSynchronousImpl(1, 1)); // no state should have been checkpointed assertFalse(listState.get().iterator().hasNext()); // acknowledgement of the checkpoint should also not result in any offset commits consumer.notifyCheckpointComplete(1L); assertNull(fetcher.getAndClearLastCommittedOffsets()); assertEquals(0, fetcher.getCommitCount()); }
Example #12
Source File: Kafka09PartitionDiscoverer.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override protected List<KafkaTopicPartition> getAllPartitionsForTopics(List<String> topics) throws WakeupException { List<KafkaTopicPartition> partitions = new LinkedList<>(); try { for (String topic : topics) { for (PartitionInfo partitionInfo : kafkaConsumer.partitionsFor(topic)) { partitions.add(new KafkaTopicPartition(partitionInfo.topic(), partitionInfo.partition())); } } } catch (org.apache.kafka.common.errors.WakeupException e) { // rethrow our own wakeup exception throw new WakeupException(); } return partitions; }
Example #13
Source File: Kafka011TableSourceSinkFactoryTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override protected KafkaTableSourceBase getExpectedKafkaTableSource( TableSchema schema, Optional<String> proctimeAttribute, List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors, Map<String, String> fieldMapping, String topic, Properties properties, DeserializationSchema<Row> deserializationSchema, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets) { return new Kafka011TableSource( schema, proctimeAttribute, rowtimeAttributeDescriptors, Optional.of(fieldMapping), topic, properties, deserializationSchema, startupMode, specificStartupOffsets ); }
Example #14
Source File: Kafka011TableSource.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Creates a Kafka 0.11 {@link StreamTableSource}. * * @param schema Schema of the produced table. * @param proctimeAttribute Field name of the processing time attribute. * @param rowtimeAttributeDescriptors Descriptor for a rowtime attribute * @param fieldMapping Mapping for the fields of the table schema to * fields of the physical returned type. * @param topic Kafka topic to consume. * @param properties Properties for the Kafka consumer. * @param deserializationSchema Deserialization schema for decoding records from Kafka. * @param startupMode Startup mode for the contained consumer. * @param specificStartupOffsets Specific startup offsets; only relevant when startup * mode is {@link StartupMode#SPECIFIC_OFFSETS}. */ public Kafka011TableSource( TableSchema schema, Optional<String> proctimeAttribute, List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors, Optional<Map<String, String>> fieldMapping, String topic, Properties properties, DeserializationSchema<Row> deserializationSchema, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets) { super( schema, proctimeAttribute, rowtimeAttributeDescriptors, fieldMapping, topic, properties, deserializationSchema, startupMode, specificStartupOffsets); }
Example #15
Source File: Kafka011TableSourceSinkFactory.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override protected KafkaTableSourceBase createKafkaTableSource( TableSchema schema, Optional<String> proctimeAttribute, List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors, Map<String, String> fieldMapping, String topic, Properties properties, DeserializationSchema<Row> deserializationSchema, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets) { return new Kafka011TableSource( schema, proctimeAttribute, rowtimeAttributeDescriptors, Optional.of(fieldMapping), topic, properties, deserializationSchema, startupMode, specificStartupOffsets); }
Example #16
Source File: KafkaPartitionDiscoverer.java From flink with Apache License 2.0 | 6 votes |
@Override protected List<KafkaTopicPartition> getAllPartitionsForTopics(List<String> topics) throws WakeupException, RuntimeException { final List<KafkaTopicPartition> partitions = new LinkedList<>(); try { for (String topic : topics) { final List<PartitionInfo> kafkaPartitions = kafkaConsumer.partitionsFor(topic); if (kafkaPartitions == null) { throw new RuntimeException("Could not fetch partitions for %s. Make sure that the topic exists.".format(topic)); } for (PartitionInfo partitionInfo : kafkaPartitions) { partitions.add(new KafkaTopicPartition(partitionInfo.topic(), partitionInfo.partition())); } } } catch (org.apache.kafka.common.errors.WakeupException e) { // rethrow our own wakeup exception throw new WakeupException(); } return partitions; }
Example #17
Source File: KafkaDynamicSourceBase.java From flink with Apache License 2.0 | 6 votes |
/** * Creates a generic Kafka {@link StreamTableSource}. * * @param outputDataType Source produced data type * @param topic Kafka topic to consume. * @param properties Properties for the Kafka consumer. * @param decodingFormat Decoding format for decoding records from Kafka. * @param startupMode Startup mode for the contained consumer. * @param specificStartupOffsets Specific startup offsets; only relevant when startup * mode is {@link StartupMode#SPECIFIC_OFFSETS}. * @param startupTimestampMillis Startup timestamp for offsets; only relevant when startup * mode is {@link StartupMode#TIMESTAMP}. */ protected KafkaDynamicSourceBase( DataType outputDataType, String topic, Properties properties, DecodingFormat<DeserializationSchema<RowData>> decodingFormat, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets, long startupTimestampMillis) { this.outputDataType = Preconditions.checkNotNull( outputDataType, "Produced data type must not be null."); this.topic = Preconditions.checkNotNull(topic, "Topic must not be null."); this.properties = Preconditions.checkNotNull(properties, "Properties must not be null."); this.decodingFormat = Preconditions.checkNotNull( decodingFormat, "Decoding format must not be null."); this.startupMode = Preconditions.checkNotNull(startupMode, "Startup mode must not be null."); this.specificStartupOffsets = Preconditions.checkNotNull( specificStartupOffsets, "Specific offsets must not be null."); this.startupTimestampMillis = startupTimestampMillis; }
Example #18
Source File: FlinkKafkaConsumerBaseMigrationTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaConsumer( AbstractFetcher<T, ?> fetcher, List<String> topics, List<KafkaTopicPartition> partitions, long discoveryInterval) { super( topics, null, (KafkaDeserializationSchema< T >) mock(KafkaDeserializationSchema.class), discoveryInterval, false); this.fetcher = fetcher; this.partitions = partitions; }
Example #19
Source File: Kafka08TableSourceSinkFactoryTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override protected KafkaTableSourceBase getExpectedKafkaTableSource( TableSchema schema, Optional<String> proctimeAttribute, List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors, Map<String, String> fieldMapping, String topic, Properties properties, DeserializationSchema<Row> deserializationSchema, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets) { return new Kafka08TableSource( schema, proctimeAttribute, rowtimeAttributeDescriptors, Optional.of(fieldMapping), topic, properties, deserializationSchema, startupMode, specificStartupOffsets ); }
Example #20
Source File: Kafka08TableSourceSinkFactory.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override protected KafkaTableSourceBase createKafkaTableSource( TableSchema schema, Optional<String> proctimeAttribute, List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors, Map<String, String> fieldMapping, String topic, Properties properties, DeserializationSchema<Row> deserializationSchema, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets) { return new Kafka08TableSource( schema, proctimeAttribute, rowtimeAttributeDescriptors, Optional.of(fieldMapping), topic, properties, deserializationSchema, startupMode, specificStartupOffsets); }
Example #21
Source File: TestPartitionDiscoverer.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public static List<List<KafkaTopicPartition>> createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(final List<KafkaTopicPartition> fixed) { @SuppressWarnings("unchecked") List<List<KafkaTopicPartition>> mockSequence = mock(List.class); when(mockSequence.get(anyInt())).thenAnswer(new Answer<List<KafkaTopicPartition>>() { @Override public List<KafkaTopicPartition> answer(InvocationOnMock invocationOnMock) throws Throwable { return new ArrayList<>(fixed); } }); return mockSequence; }
Example #22
Source File: TestPartitionDiscoverer.java From flink with Apache License 2.0 | 5 votes |
public static List<List<KafkaTopicPartition>> createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(final List<KafkaTopicPartition> fixed) { @SuppressWarnings("unchecked") List<List<KafkaTopicPartition>> mockSequence = mock(List.class); when(mockSequence.get(anyInt())).thenAnswer(new Answer<List<KafkaTopicPartition>>() { @Override public List<KafkaTopicPartition> answer(InvocationOnMock invocationOnMock) throws Throwable { return new ArrayList<>(fixed); } }); return mockSequence; }
Example #23
Source File: KafkaTableSourceSinkFactoryTestBase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
protected abstract KafkaTableSourceBase getExpectedKafkaTableSource( TableSchema schema, Optional<String> proctimeAttribute, List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors, Map<String, String> fieldMapping, String topic, Properties properties, DeserializationSchema<Row> deserializationSchema, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets);
Example #24
Source File: FlinkKafkaConsumer010.java From flink with Apache License 2.0 | 5 votes |
@Override protected Map<KafkaTopicPartition, Long> fetchOffsetsWithTimestamp( Collection<KafkaTopicPartition> partitions, long timestamp) { Map<TopicPartition, Long> partitionOffsetsRequest = new HashMap<>(partitions.size()); for (KafkaTopicPartition partition : partitions) { partitionOffsetsRequest.put( new TopicPartition(partition.getTopic(), partition.getPartition()), timestamp); } final Map<KafkaTopicPartition, Long> result = new HashMap<>(partitions.size()); // use a short-lived consumer to fetch the offsets; // this is ok because this is a one-time operation that happens only on startup try (KafkaConsumer<?, ?> consumer = new KafkaConsumer(properties)) { for (Map.Entry<TopicPartition, OffsetAndTimestamp> partitionToOffset : consumer.offsetsForTimes(partitionOffsetsRequest).entrySet()) { result.put( new KafkaTopicPartition(partitionToOffset.getKey().topic(), partitionToOffset.getKey().partition()), (partitionToOffset.getValue() == null) ? null : partitionToOffset.getValue().offset()); } } return result; }
Example #25
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
private void checkFilterRestoredPartitionsWithDisovered( List<String> restoredKafkaTopics, List<String> initKafkaTopics, List<String> expectedSubscribedPartitions, Boolean disableFiltering) throws Exception { final AbstractPartitionDiscoverer discoverer = new TestPartitionDiscoverer( new KafkaTopicsDescriptor(initKafkaTopics, null), 0, 1, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(initKafkaTopics), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn( initKafkaTopics.stream() .map(topic -> new KafkaTopicPartition(topic, 0)) .collect(Collectors.toList()))); final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>(initKafkaTopics, discoverer); if (disableFiltering) { consumer.disableFilterRestoredPartitionsWithSubscribedTopics(); } final TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>(); for (int i = 0; i < restoredKafkaTopics.size(); i++) { listState.add(new Tuple2<>(new KafkaTopicPartition(restoredKafkaTopics.get(i), 0), 12345L)); } setupConsumer(consumer, true, listState, true, 0, 1); Map<KafkaTopicPartition, Long> subscribedPartitionsToStartOffsets = consumer.getSubscribedPartitionsToStartOffsets(); assertEquals(new HashSet<>(expectedSubscribedPartitions), subscribedPartitionsToStartOffsets .keySet() .stream() .map(partition -> partition.getTopic()) .collect(Collectors.toSet())); }
Example #26
Source File: FlinkKafkaConsumer.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<WatermarkStrategy<T>> watermarkStrategy, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { // make sure that auto commit is disabled when our offset commit mode is ON_CHECKPOINTS; // this overwrites whatever setting the user configured in the properties adjustAutoCommitConfig(properties, offsetCommitMode); return new KafkaFetcher<>( sourceContext, assignedPartitionsWithInitialOffsets, watermarkStrategy, runtimeContext.getProcessingTimeService(), runtimeContext.getExecutionConfig().getAutoWatermarkInterval(), runtimeContext.getUserCodeClassLoader(), runtimeContext.getTaskNameWithSubtasks(), deserializer, properties, pollTimeout, runtimeContext.getMetricGroup(), consumerMetricGroup, useMetrics); }
Example #27
Source File: KafkaFetcher.java From flink with Apache License 2.0 | 5 votes |
@Override protected void doCommitInternalOffsetsToKafka( Map<KafkaTopicPartition, Long> offsets, @Nonnull KafkaCommitCallback commitCallback) throws Exception { @SuppressWarnings("unchecked") List<KafkaTopicPartitionState<T, TopicPartition>> partitions = subscribedPartitionStates(); Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>(partitions.size()); for (KafkaTopicPartitionState<T, TopicPartition> partition : partitions) { Long lastProcessedOffset = offsets.get(partition.getKafkaTopicPartition()); if (lastProcessedOffset != null) { checkState(lastProcessedOffset >= 0, "Illegal offset value to commit"); // committed offsets through the KafkaConsumer need to be 1 more than the last processed offset. // This does not affect Flink's checkpoints/saved state. long offsetToCommit = lastProcessedOffset + 1; offsetsToCommit.put(partition.getKafkaPartitionHandle(), new OffsetAndMetadata(offsetToCommit)); partition.setCommittedOffset(offsetToCommit); } } // record the work to be committed by the main consumer thread and make sure the consumer notices that consumerThread.setOffsetsToCommit(offsetsToCommit, commitCallback); }
Example #28
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> thisSubtaskPartitionsWithStartOffsets, SerializedValue<WatermarkStrategy<T>> watermarkStrategy, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { return fetcher; }
Example #29
Source File: KafkaBaseConnectorDescriptor.java From alchemy with Apache License 2.0 | 5 votes |
abstract KafkaTableSourceBase newTableSource( TableSchema schema, Optional<String> proctimeAttribute, List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors, Optional<Map<String, String>> fieldMapping, String topic, Properties properties, DeserializationSchema<Row> deserializationSchema, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets );
Example #30
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> thisSubtaskPartitionsWithStartOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { return fetcher; }