org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks Java Examples
The following examples show how to use
org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FlinkPulsarSourceTest.java From pulsar-flink with Apache License 2.0 | 6 votes |
public TestingFetcher( SourceFunction.SourceContext<T> sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval) throws Exception { super( sourceContext, seedTopicsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, TestingFetcher.class.getClassLoader(), null, null, null, 0, null, null); }
Example #2
Source File: AbstractFetcherTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
TestFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithStartOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval) throws Exception { this( sourceContext, assignedPartitionsWithStartOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, null, null); }
Example #3
Source File: AbstractFetcherTest.java From flink with Apache License 2.0 | 6 votes |
TestFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithStartOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, OneShotLatch fetchLoopWaitLatch, OneShotLatch stateIterationBlockLatch) throws Exception { super( sourceContext, assignedPartitionsWithStartOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, TestFetcher.class.getClassLoader(), new UnregisteredMetricsGroup(), false); this.fetchLoopWaitLatch = fetchLoopWaitLatch; this.stateIterationBlockLatch = stateIterationBlockLatch; }
Example #4
Source File: AbstractFetcherTest.java From flink with Apache License 2.0 | 6 votes |
TestFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithStartOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval) throws Exception { this( sourceContext, assignedPartitionsWithStartOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, null, null); }
Example #5
Source File: AbstractFetcherTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
TestFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithStartOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, OneShotLatch fetchLoopWaitLatch, OneShotLatch stateIterationBlockLatch) throws Exception { super( sourceContext, assignedPartitionsWithStartOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, TestFetcher.class.getClassLoader(), new UnregisteredMetricsGroup(), false); this.fetchLoopWaitLatch = fetchLoopWaitLatch; this.stateIterationBlockLatch = stateIterationBlockLatch; }
Example #6
Source File: AbstractFetcher.java From flink with Apache License 2.0 | 6 votes |
/** * Shortcut variant of {@link #createPartitionStateHolders(Map, int, SerializedValue, SerializedValue, ClassLoader)} * that uses the same offset for all partitions when creating their state holders. */ private List<KafkaTopicPartitionState<KPH>> createPartitionStateHolders( List<KafkaTopicPartition> partitions, long initialOffset, int timestampWatermarkMode, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ClassLoader userCodeClassLoader) throws IOException, ClassNotFoundException { Map<KafkaTopicPartition, Long> partitionsToInitialOffset = new HashMap<>(partitions.size()); for (KafkaTopicPartition partition : partitions) { partitionsToInitialOffset.put(partition, initialOffset); } return createPartitionStateHolders( partitionsToInitialOffset, timestampWatermarkMode, watermarksPeriodic, watermarksPunctuated, userCodeClassLoader); }
Example #7
Source File: FlinkPulsarRowSource.java From pulsar-flink with Apache License 2.0 | 6 votes |
@Override protected PulsarFetcher<Row> createFetcher( SourceContext sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<Row>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<Row>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, StreamingRuntimeContext streamingRuntime) throws Exception { return new PulsarRowFetcher( sourceContext, seedTopicsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, userCodeClassLoader, streamingRuntime, clientConfigurationData, readerConf, pollTimeoutMs, null, metadataReader); }
Example #8
Source File: PulsarRowFetcher.java From pulsar-flink with Apache License 2.0 | 6 votes |
public PulsarRowFetcher( SourceFunction.SourceContext<Row> sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<Row>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<Row>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, StreamingRuntimeContext runtimeContext, ClientConfigurationData clientConf, Map<String, Object> readerConf, int pollTimeoutMs, DeserializationSchema<Row> deserializer, PulsarMetadataReader metadataReader) throws Exception { super(sourceContext, seedTopicsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, userCodeClassLoader, runtimeContext, clientConf, readerConf, pollTimeoutMs, deserializer, metadataReader); }
Example #9
Source File: AbstractFetcher.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Shortcut variant of {@link #createPartitionStateHolders(Map, int, SerializedValue, SerializedValue, ClassLoader)} * that uses the same offset for all partitions when creating their state holders. */ private List<KafkaTopicPartitionState<KPH>> createPartitionStateHolders( List<KafkaTopicPartition> partitions, long initialOffset, int timestampWatermarkMode, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ClassLoader userCodeClassLoader) throws IOException, ClassNotFoundException { Map<KafkaTopicPartition, Long> partitionsToInitialOffset = new HashMap<>(partitions.size()); for (KafkaTopicPartition partition : partitions) { partitionsToInitialOffset.put(partition, initialOffset); } return createPartitionStateHolders( partitionsToInitialOffset, timestampWatermarkMode, watermarksPeriodic, watermarksPunctuated, userCodeClassLoader); }
Example #10
Source File: PulsarFetcher.java From pulsar-flink with Apache License 2.0 | 5 votes |
public PulsarFetcher( SourceContext<T> sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, StreamingRuntimeContext runtimeContext, ClientConfigurationData clientConf, Map<String, Object> readerConf, int pollTimeoutMs, DeserializationSchema<T> deserializer, PulsarMetadataReader metadataReader) throws Exception { this( sourceContext, seedTopicsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, userCodeClassLoader, runtimeContext, clientConf, readerConf, pollTimeoutMs, 3, // commit retries before fail deserializer, metadataReader); }
Example #11
Source File: FlinkPulsarSourceTest.java From pulsar-flink with Apache License 2.0 | 5 votes |
@Override protected PulsarFetcher<T> createFetcher( SourceContext sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, StreamingRuntimeContext streamingRuntime) throws Exception { return testFetcherSupplier.get(); }
Example #12
Source File: FlinkPulsarSourceTest.java From pulsar-flink with Apache License 2.0 | 5 votes |
@Override protected PulsarFetcher<T> createFetcher( SourceContext sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, StreamingRuntimeContext streamingRuntime) throws Exception { return new TestingFetcher<>(sourceContext, seedTopicsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval); }
Example #13
Source File: FlinkPulsarSource.java From pulsar-flink with Apache License 2.0 | 5 votes |
protected PulsarFetcher<T> createFetcher( SourceContext sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, StreamingRuntimeContext streamingRuntime) throws Exception { readerConf.putIfAbsent(PulsarOptions.SUBSCRIPTION_ROLE_OPTION_KEY, getSubscriptionName()); return new PulsarFetcher( sourceContext, seedTopicsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, userCodeClassLoader, streamingRuntime, clientConfigurationData, readerConf, pollTimeoutMs, commitMaxRetries, deserializer, metadataReader); }
Example #14
Source File: PulsarTopicStateWithPunctuatedWatermarks.java From pulsar-flink with Apache License 2.0 | 5 votes |
public PulsarTopicStateWithPunctuatedWatermarks( String topic, AssignerWithPunctuatedWatermarks<T> timestampsAndWatermarks) { super(topic); this.timestampsAndWatermarks = timestampsAndWatermarks; this.partitionWatermark = Long.MIN_VALUE; }
Example #15
Source File: FlinkKafkaConsumer09.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { // make sure that auto commit is disabled when our offset commit mode is ON_CHECKPOINTS; // this overwrites whatever setting the user configured in the properties adjustAutoCommitConfig(properties, offsetCommitMode); // If a rateLimiter is set, then call rateLimiter.open() with the runtime context. if (rateLimiter != null) { rateLimiter.open(runtimeContext); } return new Kafka09Fetcher<>( sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, runtimeContext.getProcessingTimeService(), runtimeContext.getExecutionConfig().getAutoWatermarkInterval(), runtimeContext.getUserCodeClassLoader(), runtimeContext.getTaskNameWithSubtasks(), deserializer, properties, pollTimeout, runtimeContext.getMetricGroup(), consumerMetricGroup, useMetrics, rateLimiter); }
Example #16
Source File: Kafka010Fetcher.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public Kafka010Fetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, String taskNameWithSubtasks, KafkaDeserializationSchema<T> deserializer, Properties kafkaProperties, long pollTimeout, MetricGroup subtaskMetricGroup, MetricGroup consumerMetricGroup, boolean useMetrics, FlinkConnectorRateLimiter rateLimiter) throws Exception { super( sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, userCodeClassLoader, taskNameWithSubtasks, deserializer, kafkaProperties, pollTimeout, subtaskMetricGroup, consumerMetricGroup, useMetrics, rateLimiter); }
Example #17
Source File: FlinkKafkaConsumerBaseMigrationTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> thisSubtaskPartitionsWithStartOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { return fetcher; }
Example #18
Source File: AbstractFetcherTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testSkipCorruptedRecordWithPunctuatedWatermarks() throws Exception { final String testTopic = "test topic name"; Map<KafkaTopicPartition, Long> originalPartitions = new HashMap<>(); originalPartitions.put(new KafkaTopicPartition(testTopic, 1), KafkaTopicPartitionStateSentinel.LATEST_OFFSET); TestSourceContext<Long> sourceContext = new TestSourceContext<>(); TestProcessingTimeService processingTimeProvider = new TestProcessingTimeService(); TestFetcher<Long> fetcher = new TestFetcher<>( sourceContext, originalPartitions, null, /* periodic watermark assigner */ new SerializedValue<AssignerWithPunctuatedWatermarks<Long>>(new PunctuatedTestExtractor()), /* punctuated watermark assigner */ processingTimeProvider, 0); final KafkaTopicPartitionState<Object> partitionStateHolder = fetcher.subscribedPartitionStates().get(0); // elements generate a watermark if the timestamp is a multiple of three fetcher.emitRecord(1L, partitionStateHolder, 1L); fetcher.emitRecord(2L, partitionStateHolder, 2L); fetcher.emitRecord(3L, partitionStateHolder, 3L); assertEquals(3L, sourceContext.getLatestElement().getValue().longValue()); assertEquals(3L, sourceContext.getLatestElement().getTimestamp()); assertTrue(sourceContext.hasWatermark()); assertEquals(3L, sourceContext.getLatestWatermark().getTimestamp()); assertEquals(3L, partitionStateHolder.getOffset()); // emit null record fetcher.emitRecord(null, partitionStateHolder, 4L); // no elements or watermarks should have been collected assertEquals(3L, sourceContext.getLatestElement().getValue().longValue()); assertEquals(3L, sourceContext.getLatestElement().getTimestamp()); assertFalse(sourceContext.hasWatermark()); // the offset in state still should have advanced assertEquals(4L, partitionStateHolder.getOffset()); }
Example #19
Source File: PulsarFetcherTest.java From pulsar-flink with Apache License 2.0 | 5 votes |
public TestFetcher( SourceFunction.SourceContext<T> sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, OneShotLatch fetchLoopWaitLatch, OneShotLatch stateIterationBlockLatch) throws Exception { super( sourceContext, seedTopicsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, TestFetcher.class.getClassLoader(), null, null, null, 0, null, null); this.fetchLoopWaitLatch = fetchLoopWaitLatch; this.stateIterationBlockLatch = stateIterationBlockLatch; }
Example #20
Source File: FlinkKafkaConsumer09.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { // make sure that auto commit is disabled when our offset commit mode is ON_CHECKPOINTS; // this overwrites whatever setting the user configured in the properties adjustAutoCommitConfig(properties, offsetCommitMode); // If a rateLimiter is set, then call rateLimiter.open() with the runtime context. if (rateLimiter != null) { rateLimiter.open(runtimeContext); } return new Kafka09Fetcher<>( sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, runtimeContext.getProcessingTimeService(), runtimeContext.getExecutionConfig().getAutoWatermarkInterval(), runtimeContext.getUserCodeClassLoader(), runtimeContext.getTaskNameWithSubtasks(), deserializer, properties, pollTimeout, runtimeContext.getMetricGroup(), consumerMetricGroup, useMetrics, rateLimiter); }
Example #21
Source File: Kafka08Fetcher.java From flink with Apache License 2.0 | 5 votes |
public Kafka08Fetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> seedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, KafkaDeserializationSchema<T> deserializer, Properties kafkaProperties, long autoCommitInterval, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { super( sourceContext, seedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, runtimeContext.getProcessingTimeService(), runtimeContext.getExecutionConfig().getAutoWatermarkInterval(), runtimeContext.getUserCodeClassLoader(), consumerMetricGroup, useMetrics); this.deserializer = checkNotNull(deserializer); this.kafkaConfig = checkNotNull(kafkaProperties); this.runtimeContext = runtimeContext; this.invalidOffsetBehavior = getInvalidOffsetBehavior(kafkaProperties); this.autoCommitInterval = autoCommitInterval; }
Example #22
Source File: FlinkKafkaConsumer08.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { long autoCommitInterval = (offsetCommitMode == OffsetCommitMode.KAFKA_PERIODIC) ? PropertiesUtil.getLong(kafkaProperties, "auto.commit.interval.ms", 60000) : -1; // this disables the periodic offset committer thread in the fetcher return new Kafka08Fetcher<>( sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, runtimeContext, deserializer, kafkaProperties, autoCommitInterval, consumerMetricGroup, useMetrics); }
Example #23
Source File: FlinkKafkaConsumer.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { // make sure that auto commit is disabled when our offset commit mode is ON_CHECKPOINTS; // this overwrites whatever setting the user configured in the properties adjustAutoCommitConfig(properties, offsetCommitMode); return new KafkaFetcher<>( sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, runtimeContext.getProcessingTimeService(), runtimeContext.getExecutionConfig().getAutoWatermarkInterval(), runtimeContext.getUserCodeClassLoader(), runtimeContext.getTaskNameWithSubtasks(), deserializer, properties, pollTimeout, runtimeContext.getMetricGroup(), consumerMetricGroup, useMetrics); }
Example #24
Source File: KafkaFetcher.java From flink with Apache License 2.0 | 5 votes |
public KafkaFetcher( SourceFunction.SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, String taskNameWithSubtasks, KafkaDeserializationSchema<T> deserializer, Properties kafkaProperties, long pollTimeout, MetricGroup subtaskMetricGroup, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { super( sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, userCodeClassLoader, consumerMetricGroup, useMetrics); this.deserializer = deserializer; this.handover = new Handover(); this.consumerThread = new KafkaConsumerThread( LOG, handover, kafkaProperties, unassignedPartitionsQueue, getFetcherName() + " for " + taskNameWithSubtasks, pollTimeout, useMetrics, consumerMetricGroup, subtaskMetricGroup); }
Example #25
Source File: Kafka010Fetcher.java From flink with Apache License 2.0 | 5 votes |
public Kafka010Fetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, String taskNameWithSubtasks, KafkaDeserializationSchema<T> deserializer, Properties kafkaProperties, long pollTimeout, MetricGroup subtaskMetricGroup, MetricGroup consumerMetricGroup, boolean useMetrics, FlinkConnectorRateLimiter rateLimiter) throws Exception { super( sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, userCodeClassLoader, taskNameWithSubtasks, deserializer, kafkaProperties, pollTimeout, subtaskMetricGroup, consumerMetricGroup, useMetrics, rateLimiter); }
Example #26
Source File: FlinkKafkaConsumer010.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { // make sure that auto commit is disabled when our offset commit mode is ON_CHECKPOINTS; // this overwrites whatever setting the user configured in the properties adjustAutoCommitConfig(properties, offsetCommitMode); FlinkConnectorRateLimiter rateLimiter = super.getRateLimiter(); // If a rateLimiter is set, then call rateLimiter.open() with the runtime context. if (rateLimiter != null) { rateLimiter.open(runtimeContext); } return new Kafka010Fetcher<>( sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, runtimeContext.getProcessingTimeService(), runtimeContext.getExecutionConfig().getAutoWatermarkInterval(), runtimeContext.getUserCodeClassLoader(), runtimeContext.getTaskNameWithSubtasks(), deserializer, properties, pollTimeout, runtimeContext.getMetricGroup(), consumerMetricGroup, useMetrics, rateLimiter); }
Example #27
Source File: KafkaTopicPartitionStateWithPunctuatedWatermarks.java From flink with Apache License 2.0 | 5 votes |
public KafkaTopicPartitionStateWithPunctuatedWatermarks( KafkaTopicPartition partition, KPH kafkaPartitionHandle, AssignerWithPunctuatedWatermarks<T> timestampsAndWatermarks) { super(partition, kafkaPartitionHandle); this.timestampsAndWatermarks = timestampsAndWatermarks; this.partitionWatermark = Long.MIN_VALUE; }
Example #28
Source File: AbstractFetcherTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testSkipCorruptedRecordWithPunctuatedWatermarks() throws Exception { final String testTopic = "test topic name"; Map<KafkaTopicPartition, Long> originalPartitions = new HashMap<>(); originalPartitions.put(new KafkaTopicPartition(testTopic, 1), KafkaTopicPartitionStateSentinel.LATEST_OFFSET); TestSourceContext<Long> sourceContext = new TestSourceContext<>(); TestProcessingTimeService processingTimeProvider = new TestProcessingTimeService(); TestFetcher<Long> fetcher = new TestFetcher<>( sourceContext, originalPartitions, null, /* periodic watermark assigner */ new SerializedValue<AssignerWithPunctuatedWatermarks<Long>>(new PunctuatedTestExtractor()), /* punctuated watermark assigner */ processingTimeProvider, 0); final KafkaTopicPartitionState<Object> partitionStateHolder = fetcher.subscribedPartitionStates().get(0); // elements generate a watermark if the timestamp is a multiple of three fetcher.emitRecord(1L, partitionStateHolder, 1L); fetcher.emitRecord(2L, partitionStateHolder, 2L); fetcher.emitRecord(3L, partitionStateHolder, 3L); assertEquals(3L, sourceContext.getLatestElement().getValue().longValue()); assertEquals(3L, sourceContext.getLatestElement().getTimestamp()); assertTrue(sourceContext.hasWatermark()); assertEquals(3L, sourceContext.getLatestWatermark().getTimestamp()); assertEquals(3L, partitionStateHolder.getOffset()); // emit null record fetcher.emitRecord(null, partitionStateHolder, 4L); // no elements or watermarks should have been collected assertEquals(3L, sourceContext.getLatestElement().getValue().longValue()); assertEquals(3L, sourceContext.getLatestElement().getTimestamp()); assertFalse(sourceContext.hasWatermark()); // the offset in state still should have advanced assertEquals(4L, partitionStateHolder.getOffset()); }
Example #29
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> thisSubtaskPartitionsWithStartOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { return fetcher; }
Example #30
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
@Override @SuppressWarnings("unchecked") protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> thisSubtaskPartitionsWithStartOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { return testFetcherSupplier.get(); }