org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartitionState Java Examples
The following examples show how to use
org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartitionState.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaConsumerThreadTest.java From flink with Apache License 2.0 | 6 votes |
public TestKafkaConsumerThread( KafkaConsumer<byte[], byte[]> mockConsumer, ClosableBlockingQueue<KafkaTopicPartitionState<TopicPartition>> unassignedPartitionsQueue, Handover handover) { super( mock(Logger.class), handover, new Properties(), unassignedPartitionsQueue, new KafkaConsumerCallBridge09(), "test-kafka-consumer-thread", 0, false, new UnregisteredMetricsGroup(), new UnregisteredMetricsGroup(), null); this.mockConsumer = mockConsumer; }
Example #2
Source File: KafkaConsumerThreadTest.java From flink with Apache License 2.0 | 6 votes |
@Override void reassignPartitions(List<KafkaTopicPartitionState<Object, TopicPartition>> newPartitions) throws Exception { // triggers blocking calls on waitPartitionReassignmentInvoked() preReassignmentLatch.trigger(); // waits for startPartitionReassignment() to be called startReassignmentLatch.await(); try { super.reassignPartitions(newPartitions); } finally { // triggers blocking calls on waitPartitionReassignmentComplete() reassignmentCompleteLatch.trigger(); // waits for endPartitionReassignment() to be called postReassignmentLatch.await(); } }
Example #3
Source File: KafkaConsumerThreadTest.java From flink with Apache License 2.0 | 6 votes |
public TestKafkaConsumerThread( Consumer<byte[], byte[]> mockConsumer, ClosableBlockingQueue<KafkaTopicPartitionState<Object, TopicPartition>> unassignedPartitionsQueue, Handover handover) { super( mock(Logger.class), handover, new Properties(), unassignedPartitionsQueue, "test-kafka-consumer-thread", 0, false, new UnregisteredMetricsGroup(), new UnregisteredMetricsGroup(), null); this.mockConsumer = mockConsumer; }
Example #4
Source File: KafkaConsumerThreadTest.java From flink with Apache License 2.0 | 6 votes |
@Override void reassignPartitions(List<KafkaTopicPartitionState<TopicPartition>> newPartitions) throws Exception { // triggers blocking calls on waitPartitionReassignmentInvoked() preReassignmentLatch.trigger(); // waits for startPartitionReassignment() to be called startReassignmentLatch.await(); try { super.reassignPartitions(newPartitions); } finally { // triggers blocking calls on waitPartitionReassignmentComplete() reassignmentCompleteLatch.trigger(); // waits for endPartitionReassignment() to be called postReassignmentLatch.await(); } }
Example #5
Source File: Kafka010Fetcher.java From flink with Apache License 2.0 | 6 votes |
@Override protected void doCommitInternalOffsetsToKafka( Map<KafkaTopicPartition, Long> offsets, @Nonnull KafkaCommitCallback commitCallback) throws Exception { List<KafkaTopicPartitionState<T, TopicPartition>> partitions = subscribedPartitionStates(); Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>(partitions.size()); for (KafkaTopicPartitionState<T, TopicPartition> partition : partitions) { Long lastProcessedOffset = offsets.get(partition.getKafkaTopicPartition()); if (lastProcessedOffset != null) { checkState(lastProcessedOffset >= 0, "Illegal offset value to commit"); // committed offsets through the KafkaConsumer need to be 1 more than the last processed offset. // This does not affect Flink's checkpoints/saved state. long offsetToCommit = lastProcessedOffset + 1; offsetsToCommit.put(partition.getKafkaPartitionHandle(), new OffsetAndMetadata(offsetToCommit)); partition.setCommittedOffset(offsetToCommit); } } // record the work to be committed by the main consumer thread and make sure the consumer notices that consumerThread.setOffsetsToCommit(offsetsToCommit, commitCallback); }
Example #6
Source File: KafkaFetcher.java From flink with Apache License 2.0 | 6 votes |
protected void partitionConsumerRecordsHandler( List<ConsumerRecord<byte[], byte[]>> partitionRecords, KafkaTopicPartitionState<T, TopicPartition> partition) throws Exception { for (ConsumerRecord<byte[], byte[]> record : partitionRecords) { deserializer.deserialize(record, kafkaCollector); // emit the actual records. this also updates offset state atomically and emits // watermarks emitRecordsWithTimestamps( kafkaCollector.getRecords(), partition, record.offset(), record.timestamp()); if (kafkaCollector.isEndOfStreamSignalled()) { // end of stream signaled running = false; break; } } }
Example #7
Source File: KafkaConsumerThreadTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public TestKafkaConsumerThread( KafkaConsumer<byte[], byte[]> mockConsumer, ClosableBlockingQueue<KafkaTopicPartitionState<TopicPartition>> unassignedPartitionsQueue, Handover handover) { super( mock(Logger.class), handover, new Properties(), unassignedPartitionsQueue, new KafkaConsumerCallBridge09(), "test-kafka-consumer-thread", 0, false, new UnregisteredMetricsGroup(), new UnregisteredMetricsGroup(), null); this.mockConsumer = mockConsumer; }
Example #8
Source File: KafkaConsumerThreadTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override void reassignPartitions(List<KafkaTopicPartitionState<TopicPartition>> newPartitions) throws Exception { // triggers blocking calls on waitPartitionReassignmentInvoked() preReassignmentLatch.trigger(); // waits for startPartitionReassignment() to be called startReassignmentLatch.await(); try { super.reassignPartitions(newPartitions); } finally { // triggers blocking calls on waitPartitionReassignmentComplete() reassignmentCompleteLatch.trigger(); // waits for endPartitionReassignment() to be called postReassignmentLatch.await(); } }
Example #9
Source File: KafkaFetcher.java From flink with Apache License 2.0 | 5 votes |
@Override public void runFetchLoop() throws Exception { try { // kick off the actual Kafka consumer consumerThread.start(); while (running) { // this blocks until we get the next records // it automatically re-throws exceptions encountered in the consumer thread final ConsumerRecords<byte[], byte[]> records = handover.pollNext(); // get the records for each topic partition for (KafkaTopicPartitionState<T, TopicPartition> partition : subscribedPartitionStates()) { List<ConsumerRecord<byte[], byte[]>> partitionRecords = records.records(partition.getKafkaPartitionHandle()); partitionConsumerRecordsHandler(partitionRecords, partition); } } } finally { // this signals the consumer thread that no more work is to be done consumerThread.shutdown(); } // on a clean exit, wait for the runner thread try { consumerThread.join(); } catch (InterruptedException e) { // may be the result of a wake-up interruption after an exception. // we ignore this here and only restore the interruption state Thread.currentThread().interrupt(); } }
Example #10
Source File: KafkaConsumerThreadTest.java From flink with Apache License 2.0 | 5 votes |
public TestKafkaConsumerThreadRateLimit(Logger log, Handover handover, Properties kafkaProperties, ClosableBlockingQueue<KafkaTopicPartitionState<TopicPartition>> unassignedPartitionsQueue, KafkaConsumerCallBridge09 consumerCallBridge, String threadName, long pollTimeout, boolean useMetrics, MetricGroup consumerMetricGroup, MetricGroup subtaskMetricGroup, KafkaConsumer mockConsumer, FlinkConnectorRateLimiter rateLimiter) { super(log, handover, kafkaProperties, unassignedPartitionsQueue, consumerCallBridge, threadName, pollTimeout, useMetrics, consumerMetricGroup, subtaskMetricGroup, rateLimiter); this.mockConsumer = mockConsumer; }
Example #11
Source File: KafkaConsumerThread.java From flink with Apache License 2.0 | 5 votes |
public KafkaConsumerThread( Logger log, Handover handover, Properties kafkaProperties, ClosableBlockingQueue<KafkaTopicPartitionState<TopicPartition>> unassignedPartitionsQueue, String threadName, long pollTimeout, boolean useMetrics, MetricGroup consumerMetricGroup, MetricGroup subtaskMetricGroup) { super(threadName); setDaemon(true); this.log = checkNotNull(log); this.handover = checkNotNull(handover); this.kafkaProperties = checkNotNull(kafkaProperties); this.consumerMetricGroup = checkNotNull(consumerMetricGroup); this.subtaskMetricGroup = checkNotNull(subtaskMetricGroup); this.unassignedPartitionsQueue = checkNotNull(unassignedPartitionsQueue); this.pollTimeout = pollTimeout; this.useMetrics = useMetrics; this.consumerReassignmentLock = new Object(); this.nextOffsetsToCommit = new AtomicReference<>(); this.running = true; }
Example #12
Source File: KafkaConsumerThread.java From flink with Apache License 2.0 | 5 votes |
private static List<TopicPartition> convertKafkaPartitions(List<KafkaTopicPartitionState<TopicPartition>> partitions) { ArrayList<TopicPartition> result = new ArrayList<>(partitions.size()); for (KafkaTopicPartitionState<TopicPartition> p : partitions) { result.add(p.getKafkaPartitionHandle()); } return result; }
Example #13
Source File: KafkaFetcher.java From flink with Apache License 2.0 | 5 votes |
protected void emitRecord( T record, KafkaTopicPartitionState<TopicPartition> partition, long offset, ConsumerRecord<?, ?> consumerRecord) throws Exception { emitRecordWithTimestamp(record, partition, offset, consumerRecord.timestamp()); }
Example #14
Source File: KafkaFetcher.java From flink with Apache License 2.0 | 5 votes |
@Override protected void doCommitInternalOffsetsToKafka( Map<KafkaTopicPartition, Long> offsets, @Nonnull KafkaCommitCallback commitCallback) throws Exception { @SuppressWarnings("unchecked") List<KafkaTopicPartitionState<TopicPartition>> partitions = subscribedPartitionStates(); Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>(partitions.size()); for (KafkaTopicPartitionState<TopicPartition> partition : partitions) { Long lastProcessedOffset = offsets.get(partition.getKafkaTopicPartition()); if (lastProcessedOffset != null) { checkState(lastProcessedOffset >= 0, "Illegal offset value to commit"); // committed offsets through the KafkaConsumer need to be 1 more than the last processed offset. // This does not affect Flink's checkpoints/saved state. long offsetToCommit = lastProcessedOffset + 1; offsetsToCommit.put(partition.getKafkaPartitionHandle(), new OffsetAndMetadata(offsetToCommit)); partition.setCommittedOffset(offsetToCommit); } } // record the work to be committed by the main consumer thread and make sure the consumer notices that consumerThread.setOffsetsToCommit(offsetsToCommit, commitCallback); }
Example #15
Source File: Kafka010Fetcher.java From flink with Apache License 2.0 | 5 votes |
@Override protected void emitRecord( T record, KafkaTopicPartitionState<TopicPartition> partition, long offset, ConsumerRecord<?, ?> consumerRecord) throws Exception { // we attach the Kafka 0.10 timestamp here emitRecordWithTimestamp(record, partition, offset, consumerRecord.timestamp()); }
Example #16
Source File: KafkaShuffleFetcher.java From flink with Apache License 2.0 | 5 votes |
@Override protected void partitionConsumerRecordsHandler( List<ConsumerRecord<byte[], byte[]>> partitionRecords, KafkaTopicPartitionState<T, TopicPartition> partition) throws Exception { for (ConsumerRecord<byte[], byte[]> record : partitionRecords) { final KafkaShuffleElement element = kafkaShuffleDeserializer.deserialize(record); // TODO: Do we need to check the end of stream if reaching the end watermark // TODO: Currently, if one of the partition sends an end-of-stream signal the fetcher stops running. // The current "ending of stream" logic in KafkaFetcher a bit strange: if any partition has a record // signaled as "END_OF_STREAM", the fetcher will stop running. Notice that the signal is coming from // the deserializer, which means from Kafka data itself. But it is possible that other topics // and partitions still have data to read. Finishing reading Partition0 can not guarantee that Partition1 // also finishes. if (element.isRecord()) { // timestamp is inherent from upstream // If using ProcessTime, timestamp is going to be ignored (upstream does not include timestamp as well) // If using IngestionTime, timestamp is going to be overwritten // If using EventTime, timestamp is going to be used synchronized (checkpointLock) { KafkaShuffleRecord<T> elementAsRecord = element.asRecord(); sourceContext.collectWithTimestamp( elementAsRecord.value, elementAsRecord.timestamp == null ? record.timestamp() : elementAsRecord.timestamp); partition.setOffset(record.offset()); } } else if (element.isWatermark()) { final KafkaShuffleWatermark watermark = element.asWatermark(); Optional<Watermark> newWatermark = watermarkHandler.checkAndGetNewWatermark(watermark); newWatermark.ifPresent(sourceContext::emitWatermark); } } }
Example #17
Source File: KafkaConsumerThread.java From flink with Apache License 2.0 | 5 votes |
public KafkaConsumerThread( Logger log, Handover handover, Properties kafkaProperties, ClosableBlockingQueue<KafkaTopicPartitionState<T, TopicPartition>> unassignedPartitionsQueue, String threadName, long pollTimeout, boolean useMetrics, MetricGroup consumerMetricGroup, MetricGroup subtaskMetricGroup) { super(threadName); setDaemon(true); this.log = checkNotNull(log); this.handover = checkNotNull(handover); this.kafkaProperties = checkNotNull(kafkaProperties); this.consumerMetricGroup = checkNotNull(consumerMetricGroup); this.subtaskMetricGroup = checkNotNull(subtaskMetricGroup); this.unassignedPartitionsQueue = checkNotNull(unassignedPartitionsQueue); this.pollTimeout = pollTimeout; this.useMetrics = useMetrics; this.consumerReassignmentLock = new Object(); this.nextOffsetsToCommit = new AtomicReference<>(); this.running = true; }
Example #18
Source File: KafkaConsumerThread.java From flink with Apache License 2.0 | 5 votes |
private static <T> List<TopicPartition> convertKafkaPartitions(List<KafkaTopicPartitionState<T, TopicPartition>> partitions) { ArrayList<TopicPartition> result = new ArrayList<>(partitions.size()); for (KafkaTopicPartitionState<T, TopicPartition> p : partitions) { result.add(p.getKafkaPartitionHandle()); } return result; }
Example #19
Source File: KafkaConsumerThread.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private static List<TopicPartition> convertKafkaPartitions(List<KafkaTopicPartitionState<TopicPartition>> partitions) { ArrayList<TopicPartition> result = new ArrayList<>(partitions.size()); for (KafkaTopicPartitionState<TopicPartition> p : partitions) { result.add(p.getKafkaPartitionHandle()); } return result; }
Example #20
Source File: KafkaFetcher.java From flink with Apache License 2.0 | 5 votes |
@Override protected void doCommitInternalOffsetsToKafka( Map<KafkaTopicPartition, Long> offsets, @Nonnull KafkaCommitCallback commitCallback) throws Exception { @SuppressWarnings("unchecked") List<KafkaTopicPartitionState<T, TopicPartition>> partitions = subscribedPartitionStates(); Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>(partitions.size()); for (KafkaTopicPartitionState<T, TopicPartition> partition : partitions) { Long lastProcessedOffset = offsets.get(partition.getKafkaTopicPartition()); if (lastProcessedOffset != null) { checkState(lastProcessedOffset >= 0, "Illegal offset value to commit"); // committed offsets through the KafkaConsumer need to be 1 more than the last processed offset. // This does not affect Flink's checkpoints/saved state. long offsetToCommit = lastProcessedOffset + 1; offsetsToCommit.put(partition.getKafkaPartitionHandle(), new OffsetAndMetadata(offsetToCommit)); partition.setCommittedOffset(offsetToCommit); } } // record the work to be committed by the main consumer thread and make sure the consumer notices that consumerThread.setOffsetsToCommit(offsetsToCommit, commitCallback); }
Example #21
Source File: KafkaConsumerThread.java From flink with Apache License 2.0 | 5 votes |
public KafkaConsumerThread( Logger log, Handover handover, Properties kafkaProperties, ClosableBlockingQueue<KafkaTopicPartitionState<T, TopicPartition>> unassignedPartitionsQueue, String threadName, long pollTimeout, boolean useMetrics, MetricGroup consumerMetricGroup, MetricGroup subtaskMetricGroup, FlinkConnectorRateLimiter rateLimiter) { super(threadName); setDaemon(true); this.log = checkNotNull(log); this.handover = checkNotNull(handover); this.kafkaProperties = checkNotNull(kafkaProperties); this.consumerMetricGroup = checkNotNull(consumerMetricGroup); this.subtaskMetricGroup = checkNotNull(subtaskMetricGroup); this.unassignedPartitionsQueue = checkNotNull(unassignedPartitionsQueue); this.pollTimeout = pollTimeout; this.useMetrics = useMetrics; this.consumerReassignmentLock = new Object(); this.nextOffsetsToCommit = new AtomicReference<>(); this.running = true; if (rateLimiter != null) { this.rateLimiter = rateLimiter; } }
Example #22
Source File: KafkaConsumerThread.java From flink with Apache License 2.0 | 5 votes |
private static <T> List<TopicPartition> convertKafkaPartitions(List<KafkaTopicPartitionState<T, TopicPartition>> partitions) { ArrayList<TopicPartition> result = new ArrayList<>(partitions.size()); for (KafkaTopicPartitionState<T, TopicPartition> p : partitions) { result.add(p.getKafkaPartitionHandle()); } return result; }
Example #23
Source File: KafkaConsumerThreadTest.java From flink with Apache License 2.0 | 5 votes |
@Test(timeout = 10000) public void testCloseWithoutAssignedPartitions() throws Exception { // no initial assignment final Consumer<byte[], byte[]> mockConsumer = createMockConsumer( new LinkedHashMap<TopicPartition, Long>(), Collections.<TopicPartition, Long>emptyMap(), false, null, null); // setup latch so the test waits until testThread is blocked on getBatchBlocking method final MultiShotLatch getBatchBlockingInvoked = new MultiShotLatch(); final ClosableBlockingQueue<KafkaTopicPartitionState<Object, TopicPartition>> unassignedPartitionsQueue = new ClosableBlockingQueue<KafkaTopicPartitionState<Object, TopicPartition>>() { @Override public List<KafkaTopicPartitionState<Object, TopicPartition>> getBatchBlocking() throws InterruptedException { getBatchBlockingInvoked.trigger(); return super.getBatchBlocking(); } }; final TestKafkaConsumerThread testThread = new TestKafkaConsumerThread(mockConsumer, unassignedPartitionsQueue, new Handover()); testThread.start(); getBatchBlockingInvoked.await(); testThread.shutdown(); testThread.join(); }
Example #24
Source File: KafkaConsumerThreadTest.java From flink with Apache License 2.0 | 5 votes |
public TestKafkaConsumerThreadRateLimit(Logger log, Handover handover, Properties kafkaProperties, ClosableBlockingQueue<KafkaTopicPartitionState<Object, TopicPartition>> unassignedPartitionsQueue, String threadName, long pollTimeout, boolean useMetrics, MetricGroup consumerMetricGroup, MetricGroup subtaskMetricGroup, Consumer<byte[], byte[]> mockConsumer, FlinkConnectorRateLimiter rateLimiter) { super(log, handover, kafkaProperties, unassignedPartitionsQueue, threadName, pollTimeout, useMetrics, consumerMetricGroup, subtaskMetricGroup, rateLimiter); this.mockConsumer = mockConsumer; }
Example #25
Source File: KafkaConsumerThread.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public KafkaConsumerThread( Logger log, Handover handover, Properties kafkaProperties, ClosableBlockingQueue<KafkaTopicPartitionState<TopicPartition>> unassignedPartitionsQueue, String threadName, long pollTimeout, boolean useMetrics, MetricGroup consumerMetricGroup, MetricGroup subtaskMetricGroup) { super(threadName); setDaemon(true); this.log = checkNotNull(log); this.handover = checkNotNull(handover); this.kafkaProperties = checkNotNull(kafkaProperties); this.consumerMetricGroup = checkNotNull(consumerMetricGroup); this.subtaskMetricGroup = checkNotNull(subtaskMetricGroup); this.unassignedPartitionsQueue = checkNotNull(unassignedPartitionsQueue); this.pollTimeout = pollTimeout; this.useMetrics = useMetrics; this.consumerReassignmentLock = new Object(); this.nextOffsetsToCommit = new AtomicReference<>(); this.running = true; }
Example #26
Source File: KafkaConsumerThread.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public KafkaConsumerThread( Logger log, Handover handover, Properties kafkaProperties, ClosableBlockingQueue<KafkaTopicPartitionState<TopicPartition>> unassignedPartitionsQueue, KafkaConsumerCallBridge09 consumerCallBridge, String threadName, long pollTimeout, boolean useMetrics, MetricGroup consumerMetricGroup, MetricGroup subtaskMetricGroup, FlinkConnectorRateLimiter rateLimiter) { super(threadName); setDaemon(true); this.log = checkNotNull(log); this.handover = checkNotNull(handover); this.kafkaProperties = checkNotNull(kafkaProperties); this.consumerMetricGroup = checkNotNull(consumerMetricGroup); this.subtaskMetricGroup = checkNotNull(subtaskMetricGroup); this.consumerCallBridge = checkNotNull(consumerCallBridge); this.unassignedPartitionsQueue = checkNotNull(unassignedPartitionsQueue); this.pollTimeout = pollTimeout; this.useMetrics = useMetrics; this.consumerReassignmentLock = new Object(); this.nextOffsetsToCommit = new AtomicReference<>(); this.running = true; if (rateLimiter != null) { this.rateLimiter = rateLimiter; } }
Example #27
Source File: Kafka09Fetcher.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
protected void emitRecord( T record, KafkaTopicPartitionState<TopicPartition> partition, long offset, @SuppressWarnings("UnusedParameters") ConsumerRecord<?, ?> consumerRecord) throws Exception { // the 0.9 Fetcher does not try to extract a timestamp emitRecord(record, partition, offset); }
Example #28
Source File: Kafka09Fetcher.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override protected void doCommitInternalOffsetsToKafka( Map<KafkaTopicPartition, Long> offsets, @Nonnull KafkaCommitCallback commitCallback) throws Exception { @SuppressWarnings("unchecked") List<KafkaTopicPartitionState<TopicPartition>> partitions = subscribedPartitionStates(); Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>(partitions.size()); for (KafkaTopicPartitionState<TopicPartition> partition : partitions) { Long lastProcessedOffset = offsets.get(partition.getKafkaTopicPartition()); if (lastProcessedOffset != null) { checkState(lastProcessedOffset >= 0, "Illegal offset value to commit"); // committed offsets through the KafkaConsumer need to be 1 more than the last processed offset. // This does not affect Flink's checkpoints/saved state. long offsetToCommit = lastProcessedOffset + 1; offsetsToCommit.put(partition.getKafkaPartitionHandle(), new OffsetAndMetadata(offsetToCommit)); partition.setCommittedOffset(offsetToCommit); } } // record the work to be committed by the main consumer thread and make sure the consumer notices that consumerThread.setOffsetsToCommit(offsetsToCommit, commitCallback); }
Example #29
Source File: KafkaConsumerThreadTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test(timeout = 10000) public void testCloseWithoutAssignedPartitions() throws Exception { // no initial assignment final KafkaConsumer<byte[], byte[]> mockConsumer = createMockConsumer( new LinkedHashMap<TopicPartition, Long>(), Collections.<TopicPartition, Long>emptyMap(), false, null, null); // setup latch so the test waits until testThread is blocked on getBatchBlocking method final MultiShotLatch getBatchBlockingInvoked = new MultiShotLatch(); final ClosableBlockingQueue<KafkaTopicPartitionState<TopicPartition>> unassignedPartitionsQueue = new ClosableBlockingQueue<KafkaTopicPartitionState<TopicPartition>>() { @Override public List<KafkaTopicPartitionState<TopicPartition>> getBatchBlocking() throws InterruptedException { getBatchBlockingInvoked.trigger(); return super.getBatchBlocking(); } }; final TestKafkaConsumerThread testThread = new TestKafkaConsumerThread(mockConsumer, unassignedPartitionsQueue, new Handover()); testThread.start(); getBatchBlockingInvoked.await(); testThread.shutdown(); testThread.join(); }
Example #30
Source File: KafkaConsumerThreadTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public TestKafkaConsumerThreadRateLimit(Logger log, Handover handover, Properties kafkaProperties, ClosableBlockingQueue<KafkaTopicPartitionState<TopicPartition>> unassignedPartitionsQueue, KafkaConsumerCallBridge09 consumerCallBridge, String threadName, long pollTimeout, boolean useMetrics, MetricGroup consumerMetricGroup, MetricGroup subtaskMetricGroup, KafkaConsumer mockConsumer, FlinkConnectorRateLimiter rateLimiter) { super(log, handover, kafkaProperties, unassignedPartitionsQueue, consumerCallBridge, threadName, pollTimeout, useMetrics, consumerMetricGroup, subtaskMetricGroup, rateLimiter); this.mockConsumer = mockConsumer; }