org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks Java Examples
The following examples show how to use
org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AbstractFetcher.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Shortcut variant of {@link #createPartitionStateHolders(Map, int, SerializedValue, SerializedValue, ClassLoader)} * that uses the same offset for all partitions when creating their state holders. */ private List<KafkaTopicPartitionState<KPH>> createPartitionStateHolders( List<KafkaTopicPartition> partitions, long initialOffset, int timestampWatermarkMode, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ClassLoader userCodeClassLoader) throws IOException, ClassNotFoundException { Map<KafkaTopicPartition, Long> partitionsToInitialOffset = new HashMap<>(partitions.size()); for (KafkaTopicPartition partition : partitions) { partitionsToInitialOffset.put(partition, initialOffset); } return createPartitionStateHolders( partitionsToInitialOffset, timestampWatermarkMode, watermarksPeriodic, watermarksPunctuated, userCodeClassLoader); }
Example #2
Source File: KinesisDataFetcher.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Creates a Kinesis Data Fetcher. * * @param streams the streams to subscribe to * @param sourceContext context of the source function * @param runtimeContext this subtask's runtime context * @param configProps the consumer configuration properties * @param deserializationSchema deserialization schema */ public KinesisDataFetcher(List<String> streams, SourceFunction.SourceContext<T> sourceContext, RuntimeContext runtimeContext, Properties configProps, KinesisDeserializationSchema<T> deserializationSchema, KinesisShardAssigner shardAssigner, AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner, WatermarkTracker watermarkTracker) { this(streams, sourceContext, sourceContext.getCheckpointLock(), runtimeContext, configProps, deserializationSchema, shardAssigner, periodicWatermarkAssigner, watermarkTracker, new AtomicReference<>(), new ArrayList<>(), createInitialSubscribedStreamsToLastDiscoveredShardsState(streams), KinesisProxy::create); }
Example #3
Source File: AbstractFetcherTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
TestFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithStartOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, OneShotLatch fetchLoopWaitLatch, OneShotLatch stateIterationBlockLatch) throws Exception { super( sourceContext, assignedPartitionsWithStartOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, TestFetcher.class.getClassLoader(), new UnregisteredMetricsGroup(), false); this.fetchLoopWaitLatch = fetchLoopWaitLatch; this.stateIterationBlockLatch = stateIterationBlockLatch; }
Example #4
Source File: AbstractFetcherTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
TestFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithStartOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval) throws Exception { this( sourceContext, assignedPartitionsWithStartOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, null, null); }
Example #5
Source File: AbstractFetcher.java From flink with Apache License 2.0 | 6 votes |
/** * Shortcut variant of {@link #createPartitionStateHolders(Map, int, SerializedValue, SerializedValue, ClassLoader)} * that uses the same offset for all partitions when creating their state holders. */ private List<KafkaTopicPartitionState<KPH>> createPartitionStateHolders( List<KafkaTopicPartition> partitions, long initialOffset, int timestampWatermarkMode, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ClassLoader userCodeClassLoader) throws IOException, ClassNotFoundException { Map<KafkaTopicPartition, Long> partitionsToInitialOffset = new HashMap<>(partitions.size()); for (KafkaTopicPartition partition : partitions) { partitionsToInitialOffset.put(partition, initialOffset); } return createPartitionStateHolders( partitionsToInitialOffset, timestampWatermarkMode, watermarksPeriodic, watermarksPunctuated, userCodeClassLoader); }
Example #6
Source File: AbstractFetcherTest.java From flink with Apache License 2.0 | 6 votes |
TestFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithStartOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval) throws Exception { this( sourceContext, assignedPartitionsWithStartOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, null, null); }
Example #7
Source File: AbstractFetcherTest.java From flink with Apache License 2.0 | 6 votes |
TestFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithStartOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, OneShotLatch fetchLoopWaitLatch, OneShotLatch stateIterationBlockLatch) throws Exception { super( sourceContext, assignedPartitionsWithStartOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, TestFetcher.class.getClassLoader(), new UnregisteredMetricsGroup(), false); this.fetchLoopWaitLatch = fetchLoopWaitLatch; this.stateIterationBlockLatch = stateIterationBlockLatch; }
Example #8
Source File: FlinkPulsarSourceTest.java From pulsar-flink with Apache License 2.0 | 6 votes |
public TestingFetcher( SourceFunction.SourceContext<T> sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval) throws Exception { super( sourceContext, seedTopicsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, TestingFetcher.class.getClassLoader(), null, null, null, 0, null, null); }
Example #9
Source File: KinesisDataFetcher.java From flink with Apache License 2.0 | 6 votes |
/** * Creates a Kinesis Data Fetcher. * * @param streams the streams to subscribe to * @param sourceContext context of the source function * @param runtimeContext this subtask's runtime context * @param configProps the consumer configuration properties * @param deserializationSchema deserialization schema */ public KinesisDataFetcher(List<String> streams, SourceFunction.SourceContext<T> sourceContext, RuntimeContext runtimeContext, Properties configProps, KinesisDeserializationSchema<T> deserializationSchema, KinesisShardAssigner shardAssigner, AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner, WatermarkTracker watermarkTracker) { this(streams, sourceContext, sourceContext.getCheckpointLock(), runtimeContext, configProps, deserializationSchema, shardAssigner, periodicWatermarkAssigner, watermarkTracker, new AtomicReference<>(), new ArrayList<>(), createInitialSubscribedStreamsToLastDiscoveredShardsState(streams), KinesisProxy::create); }
Example #10
Source File: FlinkPulsarRowSource.java From pulsar-flink with Apache License 2.0 | 6 votes |
@Override protected PulsarFetcher<Row> createFetcher( SourceContext sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<Row>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<Row>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, StreamingRuntimeContext streamingRuntime) throws Exception { return new PulsarRowFetcher( sourceContext, seedTopicsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, userCodeClassLoader, streamingRuntime, clientConfigurationData, readerConf, pollTimeoutMs, null, metadataReader); }
Example #11
Source File: PulsarRowFetcher.java From pulsar-flink with Apache License 2.0 | 6 votes |
public PulsarRowFetcher( SourceFunction.SourceContext<Row> sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<Row>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<Row>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, StreamingRuntimeContext runtimeContext, ClientConfigurationData clientConf, Map<String, Object> readerConf, int pollTimeoutMs, DeserializationSchema<Row> deserializer, PulsarMetadataReader metadataReader) throws Exception { super(sourceContext, seedTopicsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, userCodeClassLoader, runtimeContext, clientConf, readerConf, pollTimeoutMs, deserializer, metadataReader); }
Example #12
Source File: KinesisDataFetcher.java From flink with Apache License 2.0 | 6 votes |
/** * Creates a Kinesis Data Fetcher. * * @param streams the streams to subscribe to * @param sourceContext context of the source function * @param runtimeContext this subtask's runtime context * @param configProps the consumer configuration properties * @param deserializationSchema deserialization schema */ public KinesisDataFetcher(List<String> streams, SourceFunction.SourceContext<T> sourceContext, RuntimeContext runtimeContext, Properties configProps, KinesisDeserializationSchema<T> deserializationSchema, KinesisShardAssigner shardAssigner, AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner, WatermarkTracker watermarkTracker) { this(streams, sourceContext, sourceContext.getCheckpointLock(), runtimeContext, configProps, deserializationSchema, shardAssigner, periodicWatermarkAssigner, watermarkTracker, new AtomicReference<>(), new ArrayList<>(), createInitialSubscribedStreamsToLastDiscoveredShardsState(streams), KinesisProxy::create); }
Example #13
Source File: KinesisDataFetcher.java From flink with Apache License 2.0 | 5 votes |
@VisibleForTesting protected KinesisDataFetcher(List<String> streams, SourceFunction.SourceContext<T> sourceContext, Object checkpointLock, RuntimeContext runtimeContext, Properties configProps, KinesisDeserializationSchema<T> deserializationSchema, KinesisShardAssigner shardAssigner, AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner, WatermarkTracker watermarkTracker, AtomicReference<Throwable> error, List<KinesisStreamShardState> subscribedShardsState, HashMap<String, String> subscribedStreamsToLastDiscoveredShardIds, FlinkKinesisProxyFactory kinesisProxyFactory) { this.streams = checkNotNull(streams); this.configProps = checkNotNull(configProps); this.sourceContext = checkNotNull(sourceContext); this.checkpointLock = checkNotNull(checkpointLock); this.runtimeContext = checkNotNull(runtimeContext); this.totalNumberOfConsumerSubtasks = runtimeContext.getNumberOfParallelSubtasks(); this.indexOfThisConsumerSubtask = runtimeContext.getIndexOfThisSubtask(); this.deserializationSchema = checkNotNull(deserializationSchema); this.shardAssigner = checkNotNull(shardAssigner); this.periodicWatermarkAssigner = periodicWatermarkAssigner; this.watermarkTracker = watermarkTracker; this.kinesisProxyFactory = checkNotNull(kinesisProxyFactory); this.kinesis = kinesisProxyFactory.create(configProps); this.consumerMetricGroup = runtimeContext.getMetricGroup() .addGroup(KinesisConsumerMetricConstants.KINESIS_CONSUMER_METRICS_GROUP); this.error = checkNotNull(error); this.subscribedShardsState = checkNotNull(subscribedShardsState); this.subscribedStreamsToLastDiscoveredShardIds = checkNotNull(subscribedStreamsToLastDiscoveredShardIds); this.shardConsumersExecutor = createShardConsumersThreadPool(runtimeContext.getTaskNameWithSubtasks()); this.recordEmitter = createRecordEmitter(configProps); }
Example #14
Source File: CustomTriggerMain.java From flink-learning with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); //如果不指定时间的话,默认是 ProcessingTime,但是如果指定为事件事件的话,需要事件中带有时间或者添加时间水印 // env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); ParameterTool parameterTool = ExecutionEnvUtil.PARAMETER_TOOL; DataStream<WordEvent> data = env.addSource(new CustomSource()) .assignTimestampsAndWatermarks(new AssignerWithPeriodicWatermarks<WordEvent>() { private long currentTimestamp = Long.MIN_VALUE; private final long maxTimeLag = 5000; @Nullable @Override public Watermark getCurrentWatermark() { return new Watermark(currentTimestamp == Long.MIN_VALUE ? Long.MIN_VALUE : currentTimestamp - maxTimeLag); } @Override public long extractTimestamp(WordEvent element, long previousElementTimestamp) { long timestamp = element.getTimestamp(); currentTimestamp = Math.max(timestamp, currentTimestamp); return timestamp; } }); data.keyBy(WordEvent::getWord) .timeWindow(Time.seconds(10)) .trigger(CustomTrigger.creat()) .sum("count") .print(); env.execute("zhisheng custom Trigger Window demo"); }
Example #15
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
@Override @SuppressWarnings("unchecked") protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> thisSubtaskPartitionsWithStartOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { return testFetcherSupplier.get(); }
Example #16
Source File: Main4.java From flink-learning with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); env.setParallelism(2); DataStreamSource<String> data = env.socketTextStream("localhost", 9001); data.map(new MapFunction<String, Tuple2<String, Long>>() { @Override public Tuple2<String, Long> map(String s) throws Exception { String[] split = s.split(","); return new Tuple2<>(split[0], Long.valueOf(split[1])); } }).assignTimestampsAndWatermarks(new AssignerWithPeriodicWatermarks<Tuple2<String, Long>>() { private long currentTimestamp; @Nullable @Override public Watermark getCurrentWatermark() { return new Watermark(currentTimestamp); } @Override public long extractTimestamp(Tuple2<String, Long> tuple2, long l) { long timestamp = tuple2.f1; currentTimestamp = Math.max(timestamp, currentTimestamp); return timestamp; } }).keyBy(0) .window(EventTimeSessionWindows.withGap(Time.minutes(5))) .sum(1) .print("session "); System.out.println(env.getExecutionPlan()); env.execute(); }
Example #17
Source File: CustomTriggerMain.java From flink-learning with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); //如果不指定时间的话,默认是 ProcessingTime,但是如果指定为事件事件的话,需要事件中带有时间或者添加时间水印 // env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); ParameterTool parameterTool = ExecutionEnvUtil.PARAMETER_TOOL; DataStream<WordEvent> data = env.addSource(new CustomSource()) .assignTimestampsAndWatermarks(new AssignerWithPeriodicWatermarks<WordEvent>() { private long currentTimestamp = Long.MIN_VALUE; private final long maxTimeLag = 5000; @Nullable @Override public Watermark getCurrentWatermark() { return new Watermark(currentTimestamp == Long.MIN_VALUE ? Long.MIN_VALUE : currentTimestamp - maxTimeLag); } @Override public long extractTimestamp(WordEvent element, long previousElementTimestamp) { long timestamp = element.getTimestamp(); currentTimestamp = Math.max(timestamp, currentTimestamp); return timestamp; } }); data.keyBy(WordEvent::getWord) .timeWindow(Time.seconds(10)) .trigger(CustomTrigger.creat()) .sum("count") .print(); env.execute("zhisheng custom Trigger Window demo"); }
Example #18
Source File: Kafka010Fetcher.java From flink with Apache License 2.0 | 5 votes |
public Kafka010Fetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, String taskNameWithSubtasks, KafkaDeserializationSchema<T> deserializer, Properties kafkaProperties, long pollTimeout, MetricGroup subtaskMetricGroup, MetricGroup consumerMetricGroup, boolean useMetrics, FlinkConnectorRateLimiter rateLimiter) throws Exception { super( sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, userCodeClassLoader, taskNameWithSubtasks, deserializer, kafkaProperties, pollTimeout, subtaskMetricGroup, consumerMetricGroup, useMetrics, rateLimiter); }
Example #19
Source File: KeyedProcessFunctionDemo.java From blog_demos with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); //监听本地9999端口,读取字符串 DataStream<String> socketDataStream = env.socketTextStream("localhost", 9999); socketDataStream .flatMap(new Splitter()) //一定要设置watermark,这样才能触发processfunction的onTime操作 .assignTimestampsAndWatermarks(new AssignerWithPeriodicWatermarks<Tuple2<String, Integer>>() { @Override public long extractTimestamp(Tuple2<String, Integer> element, long previousElementTimestamp) { return System.currentTimeMillis(); } @Override public Watermark getCurrentWatermark() { // return the watermark as current time minus the maximum time lag return new Watermark(System.currentTimeMillis()); } }) .keyBy(0) .process(new CountWithTimeoutFunction()); //.print(); env.execute("API DataSource demo : socket"); }
Example #20
Source File: KinesisDataFetcher.java From flink with Apache License 2.0 | 5 votes |
@VisibleForTesting protected KinesisDataFetcher(List<String> streams, SourceFunction.SourceContext<T> sourceContext, Object checkpointLock, RuntimeContext runtimeContext, Properties configProps, KinesisDeserializationSchema<T> deserializationSchema, KinesisShardAssigner shardAssigner, AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner, WatermarkTracker watermarkTracker, AtomicReference<Throwable> error, List<KinesisStreamShardState> subscribedShardsState, HashMap<String, String> subscribedStreamsToLastDiscoveredShardIds, FlinkKinesisProxyFactory kinesisProxyFactory) { this.streams = checkNotNull(streams); this.configProps = checkNotNull(configProps); this.sourceContext = checkNotNull(sourceContext); this.checkpointLock = checkNotNull(checkpointLock); this.runtimeContext = checkNotNull(runtimeContext); this.totalNumberOfConsumerSubtasks = runtimeContext.getNumberOfParallelSubtasks(); this.indexOfThisConsumerSubtask = runtimeContext.getIndexOfThisSubtask(); this.deserializationSchema = checkNotNull(deserializationSchema); this.shardAssigner = checkNotNull(shardAssigner); this.periodicWatermarkAssigner = periodicWatermarkAssigner; this.watermarkTracker = watermarkTracker; this.kinesisProxyFactory = checkNotNull(kinesisProxyFactory); this.kinesis = kinesisProxyFactory.create(configProps); this.consumerMetricGroup = runtimeContext.getMetricGroup() .addGroup(KinesisConsumerMetricConstants.KINESIS_CONSUMER_METRICS_GROUP); this.error = checkNotNull(error); this.subscribedShardsState = checkNotNull(subscribedShardsState); this.subscribedStreamsToLastDiscoveredShardIds = checkNotNull(subscribedStreamsToLastDiscoveredShardIds); this.shardConsumersExecutor = createShardConsumersThreadPool(runtimeContext.getTaskNameWithSubtasks()); this.recordEmitter = createRecordEmitter(configProps); }
Example #21
Source File: FlinkKafkaConsumer.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { // make sure that auto commit is disabled when our offset commit mode is ON_CHECKPOINTS; // this overwrites whatever setting the user configured in the properties adjustAutoCommitConfig(properties, offsetCommitMode); return new KafkaFetcher<>( sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, runtimeContext.getProcessingTimeService(), runtimeContext.getExecutionConfig().getAutoWatermarkInterval(), runtimeContext.getUserCodeClassLoader(), runtimeContext.getTaskNameWithSubtasks(), deserializer, properties, pollTimeout, runtimeContext.getMetricGroup(), consumerMetricGroup, useMetrics); }
Example #22
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> thisSubtaskPartitionsWithStartOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { return fetcher; }
Example #23
Source File: Kafka08Fetcher.java From flink with Apache License 2.0 | 5 votes |
public Kafka08Fetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> seedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, KafkaDeserializationSchema<T> deserializer, Properties kafkaProperties, long autoCommitInterval, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { super( sourceContext, seedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, runtimeContext.getProcessingTimeService(), runtimeContext.getExecutionConfig().getAutoWatermarkInterval(), runtimeContext.getUserCodeClassLoader(), consumerMetricGroup, useMetrics); this.deserializer = checkNotNull(deserializer); this.kafkaConfig = checkNotNull(kafkaProperties); this.runtimeContext = runtimeContext; this.invalidOffsetBehavior = getInvalidOffsetBehavior(kafkaProperties); this.autoCommitInterval = autoCommitInterval; }
Example #24
Source File: FlinkKafkaConsumer09.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { // make sure that auto commit is disabled when our offset commit mode is ON_CHECKPOINTS; // this overwrites whatever setting the user configured in the properties adjustAutoCommitConfig(properties, offsetCommitMode); // If a rateLimiter is set, then call rateLimiter.open() with the runtime context. if (rateLimiter != null) { rateLimiter.open(runtimeContext); } return new Kafka09Fetcher<>( sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, runtimeContext.getProcessingTimeService(), runtimeContext.getExecutionConfig().getAutoWatermarkInterval(), runtimeContext.getUserCodeClassLoader(), runtimeContext.getTaskNameWithSubtasks(), deserializer, properties, pollTimeout, runtimeContext.getMetricGroup(), consumerMetricGroup, useMetrics, rateLimiter); }
Example #25
Source File: FlinkPulsarSource.java From pulsar-flink with Apache License 2.0 | 5 votes |
protected PulsarFetcher<T> createFetcher( SourceContext sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, StreamingRuntimeContext streamingRuntime) throws Exception { readerConf.putIfAbsent(PulsarOptions.SUBSCRIPTION_ROLE_OPTION_KEY, getSubscriptionName()); return new PulsarFetcher( sourceContext, seedTopicsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, userCodeClassLoader, streamingRuntime, clientConfigurationData, readerConf, pollTimeoutMs, commitMaxRetries, deserializer, metadataReader); }
Example #26
Source File: Main4.java From flink-learning with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); env.setParallelism(2); DataStreamSource<String> data = env.socketTextStream("localhost", 9001); data.map(new MapFunction<String, Tuple2<String, Long>>() { @Override public Tuple2<String, Long> map(String s) throws Exception { String[] split = s.split(","); return new Tuple2<>(split[0], Long.valueOf(split[1])); } }).assignTimestampsAndWatermarks(new AssignerWithPeriodicWatermarks<Tuple2<String, Long>>() { private long currentTimestamp; @Nullable @Override public Watermark getCurrentWatermark() { return new Watermark(currentTimestamp); } @Override public long extractTimestamp(Tuple2<String, Long> tuple2, long l) { long timestamp = tuple2.f1; currentTimestamp = Math.max(timestamp, currentTimestamp); return timestamp; } }).keyBy(0) .window(EventTimeSessionWindows.withGap(Time.minutes(5))) .sum(1) .print("session "); System.out.println(env.getExecutionPlan()); env.execute(); }
Example #27
Source File: PulsarFetcherTest.java From pulsar-flink with Apache License 2.0 | 5 votes |
public TestFetcher( SourceFunction.SourceContext<T> sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, OneShotLatch fetchLoopWaitLatch, OneShotLatch stateIterationBlockLatch) throws Exception { super( sourceContext, seedTopicsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, TestFetcher.class.getClassLoader(), null, null, null, 0, null, null); this.fetchLoopWaitLatch = fetchLoopWaitLatch; this.stateIterationBlockLatch = stateIterationBlockLatch; }
Example #28
Source File: FlinkPulsarSourceTest.java From pulsar-flink with Apache License 2.0 | 5 votes |
@Override protected PulsarFetcher<T> createFetcher( SourceContext sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, StreamingRuntimeContext streamingRuntime) throws Exception { return testFetcherSupplier.get(); }
Example #29
Source File: FlinkPulsarSourceTest.java From pulsar-flink with Apache License 2.0 | 5 votes |
@Override protected PulsarFetcher<T> createFetcher( SourceContext sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, StreamingRuntimeContext streamingRuntime) throws Exception { return new TestingFetcher<>(sourceContext, seedTopicsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval); }
Example #30
Source File: FlinkKafkaConsumer09.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { // make sure that auto commit is disabled when our offset commit mode is ON_CHECKPOINTS; // this overwrites whatever setting the user configured in the properties adjustAutoCommitConfig(properties, offsetCommitMode); // If a rateLimiter is set, then call rateLimiter.open() with the runtime context. if (rateLimiter != null) { rateLimiter.open(runtimeContext); } return new Kafka09Fetcher<>( sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, runtimeContext.getProcessingTimeService(), runtimeContext.getExecutionConfig().getAutoWatermarkInterval(), runtimeContext.getUserCodeClassLoader(), runtimeContext.getTaskNameWithSubtasks(), deserializer, properties, pollTimeout, runtimeContext.getMetricGroup(), consumerMetricGroup, useMetrics, rateLimiter); }