org.apache.kafka.clients.consumer.Consumer Java Examples
The following examples show how to use
org.apache.kafka.clients.consumer.Consumer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MessageConsumerFactory.java From alcor with Apache License 2.0 | 6 votes |
public Consumer Create() { Properties props = new Properties(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.kafkaAddress); props.put(ConsumerConfig.GROUP_ID_CONFIG, IKafkaConfiguration.CONSUMER_GROUP_ID); // Key is set as long props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class.getName()); Deserializer deserializer = getDeserializer(); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, deserializer.getClass().getName()); props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, IKafkaConfiguration.MAX_POLL_RECORDS); props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, IKafkaConfiguration.OFFSET_RESET_EARLIER); Consumer<Long, String> consumer = new KafkaConsumer<>(props); return consumer; }
Example #2
Source File: KafkaMessageSenderTest.java From synapse with Apache License 2.0 | 6 votes |
@Test public void shouldSendCustomMessageHeaders() { // given final Message<ExampleJsonObject> message = message( "someKey", Header.of(of("first", "one", "second", "two")), new ExampleJsonObject("banana")); // given try (final Consumer<String, String> consumer = getKafkaConsumer("someTestGroup")) { embeddedKafka.consumeFromAnEmbeddedTopic(consumer, KAFKA_TOPIC); // when messageSender.send(message).join(); // then final ConsumerRecord<String, String> record = getSingleRecord(consumer, KAFKA_TOPIC, 250L); assertThat(record.key(), is("someKey")); assertThat(record.value(), is("{\"value\":\"banana\"}")); assertThat(record.headers().lastHeader("first").value(), is("one".getBytes())); assertThat(record.headers().lastHeader("second").value(), is("two".getBytes())); } }
Example #3
Source File: ThreadedConsumerExample.java From kafka-streams-in-action with Apache License 2.0 | 6 votes |
private Runnable getConsumerThread(Properties properties) { return () -> { Consumer<String, String> consumer = null; try { consumer = new KafkaConsumer<>(properties); consumer.subscribe(Collections.singletonList("test-topic")); while (!doneConsuming) { ConsumerRecords<String, String> records = consumer.poll(5000); for (ConsumerRecord<String, String> record : records) { String message = String.format("Consumed: key = %s value = %s with offset = %d partition = %d", record.key(), record.value(), record.offset(), record.partition()); System.out.println(message); } } } catch (Exception e) { e.printStackTrace(); } finally { if (consumer != null) { consumer.close(); } } }; }
Example #4
Source File: TestUtils.java From uReplicator with Apache License 2.0 | 6 votes |
private static Consumer<Byte[], Byte[]> createConsumer(String bootstrapServer) { final Properties consumerProps = new Properties(); consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "KafkaExampleConsumer"); consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); // Create the consumer using props. final Consumer<Byte[], Byte[]> consumer = new KafkaConsumer<>(consumerProps); // Subscribe to the topic. return consumer; }
Example #5
Source File: TestUtils.java From uReplicator with Apache License 2.0 | 6 votes |
public static List<ConsumerRecord<Byte[], Byte[]>> consumeMessage(String bootstrapServer, String topicName, int timeoutMs ) throws InterruptedException { long time = new Date().getTime(); Consumer<Byte[], Byte[]> consumer = createConsumer(bootstrapServer); consumer.subscribe(Collections.singletonList(topicName)); List<ConsumerRecord<Byte[], Byte[]>> result = new ArrayList<>(); while ((new Date().getTime()) - time < timeoutMs) { ConsumerRecords<Byte[], Byte[]> records = consumer.poll(1000); Iterator<ConsumerRecord<Byte[], Byte[]>> iterator = records.iterator(); while (iterator.hasNext()) { result.add(iterator.next()); } Thread.sleep(300); } consumer.close(); return result; }
Example #6
Source File: ConsumerContainer.java From apicurio-registry with Apache License 2.0 | 6 votes |
public DynamicPool( Properties consumerProperties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, String topic, int initialConsumerThreads, Oneof2< java.util.function.Consumer<? super ConsumerRecord<K, V>>, java.util.function.Consumer<? super ConsumerRecords<K, V>> > recordOrRecordsHandler, BiConsumer<? super Consumer<?, ?>, ? super RuntimeException> consumerExceptionHandler ) { this.consumerProperties = Objects.requireNonNull(consumerProperties); this.keyDeserializer = Objects.requireNonNull(keyDeserializer); this.valueDeserializer = Objects.requireNonNull(valueDeserializer); this.topic = Objects.requireNonNull(topic); this.recordOrRecordsHandler = Objects.requireNonNull(recordOrRecordsHandler); this.consumerExceptionHandler = Objects.requireNonNull(consumerExceptionHandler); setConsumerThreads(initialConsumerThreads); }
Example #7
Source File: OffsetResetterTest.java From garmadon with Apache License 2.0 | 6 votes |
@Test public void partitionsAssignedCannotFetchOffset() throws IOException { final Consumer<Long, String> consumer = mock(Consumer.class); final PartitionedWriter successfulWriter = mock(PartitionedWriter.class); final PartitionedWriter exceptionalWriter = mock(PartitionedWriter.class); final OffsetResetter offsetResetter = new OffsetResetter<>(consumer, mock(java.util.function.Consumer.class), Arrays.asList(successfulWriter, exceptionalWriter)); final TopicPartition partition = new TopicPartition(TOPIC, 1); final List<TopicPartition> partitions = Collections.singletonList(partition); when(successfulWriter.getStartingOffsets(any())).thenReturn(new HashMap<>()); when(exceptionalWriter.getStartingOffsets(any())).thenThrow(new IOException("Ayo")); offsetResetter.onPartitionsAssigned(partitions); verify(consumer, times(1)).seekToBeginning(Collections.singleton(partition)); verifyNoMoreInteractions(consumer); }
Example #8
Source File: BaseIT.java From kafka-pubsub-emulator with Apache License 2.0 | 6 votes |
/** * Creates a KafkaConsumer that is manually assigned to all partitions of the test topic indicated * by the {@code subscription}. */ protected Consumer<String, ByteBuffer> getValidationConsumer(String topic, String subscription) { Consumer<String, ByteBuffer> consumer = kafkaClientFactory.createConsumer( ProjectSubscriptionName.of(PROJECT, subscription).toString()); Set<TopicPartition> topicPartitions = consumer .listTopics() .entrySet() .stream() .filter(e -> e.getKey().equals(ProjectTopicName.of(PROJECT, topic).toString())) .flatMap( e -> e.getValue().stream().map(p -> new TopicPartition(p.topic(), p.partition()))) .collect(Collectors.toSet()); consumer.assign(topicPartitions); return consumer; }
Example #9
Source File: KafkaStreamsTests.java From synapse with Apache License 2.0 | 6 votes |
@Test public void someOtherTest() throws ExecutionException, InterruptedException { Map<String, Object> consumerProps = consumerProps("otherTestGroup", "true", this.embeddedKafka); consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); ConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps); Consumer<String, String> consumer = cf.createConsumer(); this.embeddedKafka.consumeFromAnEmbeddedTopic(consumer, STREAMING_TOPIC2); template.send(STREAMING_TOPIC2, "someOtherTestMessage", "foo").get(); ConsumerRecord<String, String> replies = getSingleRecord(consumer, STREAMING_TOPIC2, 250L); assertThat(replies.key(), is("someOtherTestMessage")); }
Example #10
Source File: ConsumerLease.java From localization_nifi with Apache License 2.0 | 6 votes |
ConsumerLease( final long maxWaitMillis, final Consumer<byte[], byte[]> kafkaConsumer, final byte[] demarcatorBytes, final String keyEncoding, final String securityProtocol, final String bootstrapServers, final ComponentLog logger) { this.maxWaitMillis = maxWaitMillis; this.kafkaConsumer = kafkaConsumer; this.demarcatorBytes = demarcatorBytes; this.keyEncoding = keyEncoding; this.securityProtocol = securityProtocol; this.bootstrapServers = bootstrapServers; this.logger = logger; }
Example #11
Source File: ConsumerTest.java From kbear with Apache License 2.0 | 6 votes |
@Test public void seekToEnd() throws InterruptedException { produceMessages(); try (Consumer<String, String> consumer = createConsumer()) { consumer.subscribe(_topics); pollDurationTimeout(consumer); produceMessages(); System.out.println("\nseek\n"); consumer.seekToEnd(_topicPartitions); System.out.println("\nsecond-poll\n"); ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofMillis(30 * 1000)); Assert.assertEquals(0, consumerRecords.count()); } }
Example #12
Source File: KafkaTestUtil.java From rya with Apache License 2.0 | 6 votes |
/** * Polls a {@link Consumer} until it has either polled too many times without hitting the target number * of results, or it hits the target number of results. * * @param pollMs - How long each poll could take. * @param pollIterations - The maximum number of polls that will be attempted. * @param targetSize - The number of results to read before stopping. * @param consumer - The consumer that will be polled. * @return The results that were read from the consumer. * @throws Exception If the poll failed. */ public static <K, V> List<V> pollForResults( final int pollMs, final int pollIterations, final int targetSize, final Consumer<K, V> consumer) throws Exception { requireNonNull(consumer); final List<V> values = new ArrayList<>(); int i = 0; while(values.size() < targetSize && i < pollIterations) { for(final ConsumerRecord<K, V> record : consumer.poll(pollMs)) { values.add( record.value() ); } i++; } return values; }
Example #13
Source File: KafkaTestInstanceRule.java From rya with Apache License 2.0 | 6 votes |
/** * Delete all of the topics that are in the embedded Kafka instance. * * @throws InterruptedException Interrupted while waiting for the topics to be deleted. */ public void deleteAllTopics() throws InterruptedException { // Setup the consumer that is used to list topics for the source. final Properties consumerProperties = createBootstrapServerConfig(); consumerProperties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); consumerProperties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); consumerProperties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); try(final Consumer<String, String> listTopicsConsumer = new KafkaConsumer<>(consumerProperties)) { // Mark all existing topics for deletion. Set<String> topics = listTopicsConsumer.listTopics().keySet(); for(final String topic : topics) { deleteTopic(topic); } // Loop and wait until they are all gone. while(!topics.isEmpty()) { Thread.sleep(100); topics = listTopicsConsumer.listTopics().keySet(); } } }
Example #14
Source File: ConsumerTest.java From kbear with Apache License 2.0 | 6 votes |
protected void commitSync( java.util.function.BiConsumer<Consumer<String, String>, Map<TopicPartition, OffsetAndMetadata>> committer) throws InterruptedException { produceMessages(); try (Consumer<String, String> consumer = createConsumerWithoutAutoCommit()) { consumer.subscribe(_topics); pollDurationTimeout(consumer); OffsetAndMetadata committed = consumer.committed(_topicPartition); System.out.println("committed: " + committed); OffsetAndMetadata committed2 = new OffsetAndMetadata(committed.offset() + _messageCount, committed.metadata()); System.out.println("committed2: " + committed2); Map<TopicPartition, OffsetAndMetadata> offsetMap = new HashMap<>(); offsetMap.put(_topicPartition, committed2); committer.accept(consumer, offsetMap); OffsetAndMetadata committed3 = consumer.committed(_topicPartition); System.out.println("committed3: " + committed3); Assert.assertEquals(committed2.offset(), committed3.offset()); } }
Example #15
Source File: TracingConsumerTest.java From brave with Apache License 2.0 | 6 votes |
@Test public void should_call_wrapped_poll_and_close_spans() { consumer.addRecord(consumerRecord); Consumer<String, String> tracingConsumer = kafkaTracing.consumer(consumer); tracingConsumer.poll(10); // offset changed assertThat(consumer.position(topicPartition)).isEqualTo(2L); MutableSpan consumerSpan = spans.get(0); assertThat(consumerSpan.kind()).isEqualTo(CONSUMER); assertThat(consumerSpan.name()).isEqualTo("poll"); assertThat(consumerSpan.tags()) .containsOnly(entry("kafka.topic", "myTopic")); }
Example #16
Source File: RestartTest.java From kbear with Apache License 2.0 | 5 votes |
@Test public void restartConsumer() throws InterruptedException { _topics = Arrays.asList(TestData.TOPIC, TestData.TOPIC_2, TestData.TOPIC_3, TestData.TOPIC_5); Properties producerProperties = new Properties(); producerProperties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); producerProperties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); Properties consumerProperties = new Properties(); consumerProperties.put("group.id", TestData.CONSUMER_GROUP); consumerProperties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); consumerProperties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); consumerProperties.put("auto.offset.reset", "earliest"); try (Producer<String, String> producer = getClientFactory().newProducer(producerProperties); Consumer<String, String> consumer = getClientFactory().newConsumer(consumerProperties);) { consumer.subscribe(_topics); produce(producer); consumeMessages(consumer, _messageCount, TestData.TOPIC_5, _messageCount); _configurationSource.setPropertyValue(TestData.META_SERVICE_PROPERTY_KEY, TestData.META_SERVICE_URL_2); System.out.println("Sleep " + _restartSleep + "ms so as to change route"); Thread.sleep(_restartSleep); produce(producer); consumeMessages(consumer, _messageCount, TestData.TOPIC_5, 0); _configurationSource.setPropertyValue(TestData.META_SERVICE_PROPERTY_KEY, TestData.META_SERVICE_URL); System.out.println("Sleep " + _restartSleep + "ms so as to change route"); Thread.sleep(_restartSleep); consumeMessages(consumer, 0, TestData.TOPIC_5, _messageCount); } }
Example #17
Source File: PregelComputation.java From kafka-graphs with Apache License 2.0 | 5 votes |
private static boolean isTopicSynced(Consumer<byte[], byte[]> consumer, String topic, int superstep, Map<TopicPartition, Long> positions, Function<TopicPartition, Long> lastWrittenOffsets) { Set<TopicPartition> partitions = localPartitions(consumer, topic); Map<TopicPartition, Long> pos; if (positions != null) { pos = partitions.stream() .collect(Collectors.toMap(Function.identity(), tp -> positions.getOrDefault(tp, 0L))); } else { pos = positions(consumer, partitions); } Map<TopicPartition, Long> endOffsets = consumer.endOffsets(partitions); // Consumer end offsets may be stale; use last written offset if available if (lastWrittenOffsets != null) { for (Map.Entry<TopicPartition, Long> endOffset : endOffsets.entrySet()) { Long lastWrittenOffset = lastWrittenOffsets.apply(endOffset.getKey()); if (lastWrittenOffset != null && lastWrittenOffset >= endOffset.getValue()) { endOffset.setValue(lastWrittenOffset + 1); } } } boolean synced = endOffsets.equals(pos); if (synced) { log.debug("Step {}, synced Topic {}, end {}", superstep, topic, endOffsets); } else { log.debug("Step {}, not synced topic {}, pos {}, end {}", superstep, topic, pos, endOffsets); } return synced; }
Example #18
Source File: KafkaAdaptorConsumer.java From pulsar-java-tutorial with Apache License 2.0 | 5 votes |
public static void main(String[] args) { String topic = Utils.getTopicName(args); Properties props = new Properties(); props.put("bootstrap.servers", SERVICE_URL); props.put("group.id", SUBSCRIPTION_NAME); props.put("enable.auto.commit", "false"); props.put("key.deserializer", IntegerDeserializer.class.getName()); props.put("value.deserializer", StringDeserializer.class.getName()); Consumer<Integer, String> consumer = new KafkaConsumer<>(props); new ConsumerLoop(consumer, Collections.singleton(topic)).run(); }
Example #19
Source File: ConsumerTest.java From kbear with Apache License 2.0 | 5 votes |
@Test public void pausePausedResume() throws InterruptedException { produceMessages(); try (Consumer<String, String> consumer = createConsumer()) { consumer.subscribe(_topics); pollDurationTimeout(consumer); Set<TopicPartition> paused = consumer.paused(); System.out.println("paused: " + paused); Assert.assertTrue(CollectionExtension.isEmpty(paused)); consumer.pause(_topicPartitions); Thread.sleep(30 * 1000); paused = consumer.paused(); System.out.println("paused: " + paused); Assert.assertEquals(_topicPartitions, new HashSet<>(paused)); produceMessages(); ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(30)); System.out.println("consumerRecords: " + consumerRecords); Assert.assertEquals(0, consumerRecords.count()); consumer.resume(_topicPartitions); pollDurationTimeout(consumer); } }
Example #20
Source File: PartitionsPauseStateHandlerTest.java From garmadon with Apache License 2.0 | 5 votes |
@Test public void pauseAndResumeWithNoAssignedPartition() { Consumer<String, String> consumer = mock(Consumer.class); Class clazz = String.class; PartitionsPauseStateHandler handler = new PartitionsPauseStateHandler(consumer); handler.pause(clazz); handler.resume(clazz); verifyZeroInteractions(consumer); }
Example #21
Source File: SubscribableKafkaMessageSource.java From extension-kafka with Apache License 2.0 | 5 votes |
/** * {@inheritDoc} * <p/> * Any subscribed Event Processor will be placed in the same Consumer Group, defined through the (mandatory) {@link * Builder#groupId(String)} method. */ @Override public Registration subscribe(java.util.function.Consumer<List<? extends EventMessage<?>>> eventProcessor) { if (this.eventProcessors.add(eventProcessor)) { logger.debug("Event Processor [{}] subscribed successfully", eventProcessor); } else { logger.info("Event Processor [{}] not added. It was already subscribed", eventProcessor); } if (autoStart) { logger.info("Starting event consumption as auto start is enabled"); start(); } return () -> { if (eventProcessors.remove(eventProcessor)) { logger.debug("Event Processor [{}] unsubscribed successfully", eventProcessor); if (eventProcessors.isEmpty() && autoStart) { logger.info("Closing event consumption as auto start is enabled"); close(); } return true; } else { logger.info("Event Processor [{}] not removed. It was already unsubscribed", eventProcessor); return false; } }; }
Example #22
Source File: ConsumerTest.java From kbear with Apache License 2.0 | 5 votes |
@Test public void pollDurationTimeout() throws InterruptedException { produceMessages(); try (Consumer<String, String> consumer = createConsumer()) { consumer.subscribe(_topics); pollDurationTimeout(consumer); } }
Example #23
Source File: ConsumerTest.java From kbear with Apache License 2.0 | 5 votes |
@Test public void pollLongTimeout() throws InterruptedException { produceMessages(); try (Consumer<String, String> consumer = createConsumer()) { consumer.subscribe(_topics); pollLongTimeout(consumer); } }
Example #24
Source File: DeleteQueryCommandIT.java From rya with Apache License 2.0 | 5 votes |
@Before public void setup() { // Make sure the topic that the change log uses exists. final String changeLogTopic = KafkaTopics.queryChangeLogTopic(ryaInstance); System.out.println("Test Change Log Topic: " + changeLogTopic); kafka.createTopic(changeLogTopic); // Setup the QueryRepository used by the test. final Producer<?, QueryChange> queryProducer = KafkaTestUtil.makeProducer(kafka, StringSerializer.class, QueryChangeSerializer.class); final Consumer<?, QueryChange>queryConsumer = KafkaTestUtil.fromStartConsumer(kafka, StringDeserializer.class, QueryChangeDeserializer.class); final QueryChangeLog changeLog = new KafkaQueryChangeLog(queryProducer, queryConsumer, changeLogTopic); queryRepo = new InMemoryQueryRepository(changeLog, Scheduler.newFixedRateSchedule(0L, 5, TimeUnit.SECONDS)); }
Example #25
Source File: ConsumerTest.java From kbear with Apache License 2.0 | 5 votes |
@Test public void assign() throws InterruptedException { produceMessages(); try (Consumer<String, String> consumer = createConsumer()) { consumer.assign(_topicPartitions); pollDurationTimeout(consumer); Set<TopicPartition> assignments = consumer.assignment(); System.out.println("assignments: " + assignments); Assert.assertEquals(_topicPartitions, assignments); } }
Example #26
Source File: ConsumerTest.java From kbear with Apache License 2.0 | 5 votes |
@Test public void assignment() throws InterruptedException { produceMessages(); try (Consumer<String, String> consumer = createConsumer()) { consumer.subscribe(_topics); pollDurationTimeout(consumer); Set<TopicPartition> assignments = consumer.assignment(); System.out.println("assignments: " + assignments); Assert.assertEquals(_topicPartitions, assignments); } }
Example #27
Source File: KafkaMirrorMakerConnector.java From brooklin with BSD 2-Clause "Simplified" License | 5 votes |
private Consumer<?, ?> createConsumer(Properties consumerProps, String bootstrapServers, String groupId) { Properties properties = new Properties(); properties.putAll(consumerProps); properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); properties.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getCanonicalName()); properties.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getCanonicalName()); properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, CONSUMER_AUTO_OFFSET_RESET_CONFIG_LATEST); return _listenerConsumerFactory.createConsumer(properties); }
Example #28
Source File: TestMapRDBCDCSource.java From datacollector with Apache License 2.0 | 5 votes |
@Test public void testMultiplePartitions() throws StageException, InterruptedException { MapRDBCDCBeanConfig conf = getConfig(); conf.topicTableList = Collections.singletonMap("topic", "table"); conf.numberOfThreads = 1; ConsumerRecords<byte[], ChangeDataRecord> consumerRecords1 = generateConsumerRecords(5, 1, "topic", 0, ChangeDataRecordType.RECORD_INSERT); ConsumerRecords<byte[], ChangeDataRecord> consumerRecords2 = generateConsumerRecords(5, 1, "topic", 1, ChangeDataRecordType.RECORD_INSERT); ConsumerRecords<byte[], ChangeDataRecord> emptyRecords = generateConsumerRecords(0, 1, "topic", 0, ChangeDataRecordType.RECORD_INSERT); Consumer mockConsumer = Mockito.mock(Consumer.class); List<Consumer> consumerList = Collections.singletonList(mockConsumer); Mockito .when(mockConsumer.poll(conf.batchWaitTime)) .thenReturn(consumerRecords1) .thenReturn(consumerRecords2) .thenReturn(emptyRecords); MapRDBCDCSource source = createSource(conf, consumerList.iterator()); PushSourceRunner sourceRunner = new PushSourceRunner.Builder(MapRDBCDCDSource.class, source) .addOutputLane("lane") .build(); sourceRunner.runInit(); MultiKafkaPushSourceTestCallback callback = new MultiKafkaPushSourceTestCallback(sourceRunner, 2); try { sourceRunner.runProduce(new HashMap<>(), 5, callback); int records = callback.waitForAllBatches(); source.await(); Assert.assertEquals(10, records); Assert.assertFalse(source.isRunning()); } catch (Exception e) { Assert.fail(e.getMessage()); throw e; } finally { sourceRunner.runDestroy(); } }
Example #29
Source File: TracingKafkaClientSupplier.java From java-kafka-client with Apache License 2.0 | 5 votes |
@Override public Consumer<byte[], byte[]> getConsumer(Map<String, Object> config) { return new TracingKafkaConsumerBuilder<>( new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer()), tracer).withDecorators(spanDecorators).withSpanNameProvider(consumerSpanNameProvider) .build(); }
Example #30
Source File: KafkaConsumerMetricsTest.java From micrometer with Apache License 2.0 | 5 votes |
@Test void consumerBeforeBindingWhenClosedShouldRemoveMeters() { MeterRegistry registry = new SimpleMeterRegistry(); try (Consumer<Long, String> consumer = createConsumer()) { kafkaConsumerMetrics.bindTo(registry); Gauge gauge = registry.get("kafka.consumer.assigned.partitions").gauge(); assertThat(gauge.getId().getTag("client.id")).isEqualTo("consumer-" + consumerCount); } assertThat(registry.find("kafka.consumer.assigned.partitions").gauge()).isNull(); }