org.apache.kafka.clients.consumer.KafkaConsumer Java Examples
The following examples show how to use
org.apache.kafka.clients.consumer.KafkaConsumer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ConsumerExample.java From pulsar with Apache License 2.0 | 8 votes |
public static void main(String[] args) { String topic = "persistent://public/default/test"; Properties props = new Properties(); props.put("bootstrap.servers", "pulsar://localhost:6650"); props.put("group.id", "my-subscription-name"); props.put("enable.auto.commit", "false"); props.put("key.deserializer", IntegerDeserializer.class.getName()); props.put("value.deserializer", StringDeserializer.class.getName()); @SuppressWarnings("resource") Consumer<Integer, String> consumer = new KafkaConsumer<>(props); consumer.subscribe(Arrays.asList(topic)); while (true) { ConsumerRecords<Integer, String> records = consumer.poll(100); records.forEach(record -> { log.info("Received record: {}", record); }); // Commit last offset consumer.commitSync(); } }
Example #2
Source File: SynapseKafkaAutoConfiguration.java From synapse with Apache License 2.0 | 7 votes |
@Bean @ConditionalOnMissingBean(name = "kafkaMessageLogReceiverEndpointFactory") public MessageLogReceiverEndpointFactory kafkaMessageLogReceiverEndpointFactory(final KafkaProperties kafkaProperties, final MessageInterceptorRegistry interceptorRegistry, final ApplicationEventPublisher eventPublisher, final ConsumerFactory<String, String> kafkaConsumerFactory) { LOG.info("Auto-configuring Kafka MessageLogReceiverEndpointFactory"); final ExecutorService executorService = newCachedThreadPool( new ThreadFactoryBuilder().setNameFormat("kafka-message-log-%d").build() ); final KafkaConsumer<String, String> kafkaConsumer = (KafkaConsumer<String, String>)kafkaConsumerFactory.createConsumer(); return new KafkaMessageLogReceiverEndpointFactory( interceptorRegistry, kafkaConsumer, executorService, eventPublisher); }
Example #3
Source File: Example.java From kafka-serializer-example with MIT License | 7 votes |
public static void runConsumer(Properties properties, String topic) throws Exception { properties.put("group.id", "test"); properties.put("enable.auto.commit", "true"); properties.put("auto.commit.interval.ms", "1000"); properties.put("session.timeout.ms", "30000"); properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); System.out.printf("Running consumer with serializer %s on topic %s\n", properties.getProperty("value.deserializer"), topic); KafkaConsumer<String, SensorReading> consumer = new KafkaConsumer<>(properties); consumer.subscribe(Arrays.asList(topic)); while (true) { ConsumerRecords<String, SensorReading> records = consumer.poll(100); for (ConsumerRecord<String, SensorReading> record : records) System.out.printf("offset = %d, key = %s, value = %s\n", record.offset(), record.key(), record.value()); } }
Example #4
Source File: ConsumerAOC.java From javatech with Creative Commons Attribution Share Alike 4.0 International | 6 votes |
public static void main(String[] args) { // 1. 指定消费者的配置 final Properties props = new Properties(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, HOST); props.put(ConsumerConfig.GROUP_ID_CONFIG, "test"); props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); // 2. 使用配置初始化 Kafka 消费者 KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props); // 3. 消费者订阅 Topic consumer.subscribe(Arrays.asList("t1")); while (true) { // 4. 消费消息 ConsumerRecords<String, String> records = consumer.poll(100); for (ConsumerRecord<String, String> record : records) { System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value()); } } }
Example #5
Source File: KafkaAdminClientTest.java From common-kafka with Apache License 2.0 | 6 votes |
@Test public void getConsumerGroupSummary() { client.createTopic(testName.getMethodName(), 1, 1); Properties properties = new Properties(); properties.putAll(KafkaTests.getProps()); properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class.getName()); properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class.getName()); properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testName.getMethodName()); properties.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testName.getMethodName() + "-client-id"); try (Consumer<Object, Object> consumer = new KafkaConsumer<>(properties)) { consumer.subscribe(Arrays.asList(testName.getMethodName())); consumer.poll(Duration.ofSeconds(5L)); AdminClient.ConsumerGroupSummary summary = client.getConsumerGroupSummary(testName.getMethodName()); assertThat("Expected only 1 consumer summary when getConsumerGroupSummaries(" + testName.getMethodName() + ")", convertToJavaSet(summary.consumers().get().iterator()).size(), is(1)); assertThat(summary.state(), is(notNullValue())); assertThat(summary.coordinator(), is(notNullValue())); assertThat(summary.assignmentStrategy(), is(notNullValue())); } }
Example #6
Source File: EarliestNativeTest.java From vertx-kafka-client with Apache License 2.0 | 6 votes |
public static void main(String[] args) { Map<String, String> config = new HashMap<>(); config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); config.put(ConsumerConfig.GROUP_ID_CONFIG, "my-group"); config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); KafkaConsumer consumer = new KafkaConsumer(config); consumer.subscribe(Collections.singleton("my-topic")); while (true) { ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000)); for (ConsumerRecord<String, String> record: records) { System.out.println(record); } } }
Example #7
Source File: ParallelWebKafkaConsumer.java From kafka-webview with MIT License | 6 votes |
private List<KafkaResult> consume(final KafkaConsumer kafkaConsumer) { final List<KafkaResult> kafkaResultList = new ArrayList<>(); final ConsumerRecords<?,?> consumerRecords = kafkaConsumer.poll(pollTimeoutDuration); logger.info("Consumed {} records", consumerRecords.count()); for (final ConsumerRecord consumerRecord : consumerRecords) { // Get next record // Convert to KafkaResult. final KafkaResult kafkaResult = new KafkaResult( consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp(), consumerRecord.key(), consumerRecord.value() ); // Add to list. kafkaResultList.add(kafkaResult); } // Commit offsets commit(kafkaConsumer); return kafkaResultList; }
Example #8
Source File: MaasAppenderEvent.java From DBus with Apache License 2.0 | 6 votes |
public MaasAppenderEvent(String topic, String dataTopic) { super(01); this.topic = topic; this.dataTopic = dataTopic; dao = new DbusDataDaoImpl(); Properties props = HeartBeatConfigContainer.getInstance().getKafkaConsumerConfig(); Properties producerProps = HeartBeatConfigContainer.getInstance().getmaasConf().getProducerProp(); try { LoggerFactory.getLogger().info("[topic] ...." + topic); LoggerFactory.getLogger().info("[maas-appender-event] initial........................."); dataConsumer = new KafkaConsumer<>(props); partition0 = new TopicPartition(this.topic, 0); dataConsumer.assign(Arrays.asList(partition0)); dataConsumer.seekToEnd(Arrays.asList(partition0)); statProducer = new KafkaProducer<>(producerProps); } catch (Exception e) { e.printStackTrace(); } }
Example #9
Source File: KafkaEasyTransMsgConsumerImpl.java From EasyTransaction with Apache License 2.0 | 6 votes |
public KafkaEasyTransMsgConsumerImpl(ConsumerConfig cfg, ObjectSerializer serializer, KafkaEasyTransMsgPublisherImpl retryQueueMsgProducer) { this.serializer = serializer; this.cfg = cfg; consumer = new KafkaConsumer<>(cfg.getNativeCfg()); reconsumer = new KafkaConsumer<>(cfg.getNativeCfg()); this.retryQueueMsgProducer = retryQueueMsgProducer; threadPool = Executors.newFixedThreadPool(cfg.getConsumerThread(), new NamedThreadFactory("KafkaMsgHandler")); // 计算每个重试次数对应的重试时间等级阈值 List<List<Integer>> reconsumeCfg = cfg.getReconsume(); initRetryThreshold(reconsumeCfg); initRetryRecordsMap(); initRetryQueueSubscribe(reconsumeCfg); initRetryQueuePartitionCountMap(); }
Example #10
Source File: Launcher.java From SkyEye with GNU General Public License v3.0 | 6 votes |
public static void main(String[] args) { SpringApplicationBuilder builder = new SpringApplicationBuilder(Launcher.class); Set<ApplicationListener<?>> listeners = builder.application().getListeners(); for (Iterator<ApplicationListener<?>> it = listeners.iterator(); it.hasNext();) { ApplicationListener<?> listener = it.next(); if (listener instanceof LoggingApplicationListener) { it.remove(); } } builder.application().setListeners(listeners); ConfigurableApplicationContext context = builder.run(args); LOGGER.info("collector metrics start successfully"); KafkaConsumer kafkaConsumer = (KafkaConsumer<byte[], String>) context.getBean("kafkaConsumer"); Task task = (Task) context.getBean("metricsTask"); // 优雅停止项目 Runtime.getRuntime().addShutdownHook(new ShutdownHookRunner(kafkaConsumer, task)); task.doTask(); }
Example #11
Source File: KafkaTopicBroadcaster.java From cqrs-eventsourcing-kafka with Apache License 2.0 | 6 votes |
public KafkaTopicBroadcaster(String name, ObjectMapper objectMapper, String zookeeper) { super(); Properties props = new Properties(); try { props.put("client.id", InetAddress.getLocalHost().getHostName()); } catch (UnknownHostException e) { throw new RuntimeException(); } props.put("bootstrap.servers", zookeeper); props.put("group.id", name); props.put("key.deserializer", StringDeserializer.class); props.put("value.deserializer", StringDeserializer.class); props.put("enable.auto.commit", "false"); props.put("auto.offset.reset", "earliest"); this.consumer = new KafkaConsumer(props); this.objectMapper = objectMapper; }
Example #12
Source File: CheckBeginingOffset.java From BigData-In-Practice with Apache License 2.0 | 6 votes |
public static void main(String[] args) { KafkaConsumer<String, String> kafkaConsumer = createNewConsumer(); List<PartitionInfo> partitions = kafkaConsumer.partitionsFor("topic-monitor"); List<TopicPartition> tpList = partitions.stream() .map(pInfo -> new TopicPartition(pInfo.topic(), pInfo.partition())) .collect(toList()); Map<TopicPartition, Long> beginningOffsets = kafkaConsumer.beginningOffsets(tpList); System.out.println(beginningOffsets); }
Example #13
Source File: KafkaTestUtils.java From kafka-junit with BSD 3-Clause "New" or "Revised" License | 6 votes |
/** * Return Kafka Consumer configured to consume from internal Kafka Server. * @param <K> Type of message key * @param <V> Type of message value * @param keyDeserializer Class of deserializer to be used for keys. * @param valueDeserializer Class of deserializer to be used for values. * @param config Additional consumer configuration options to be set. * @return KafkaProducer configured to produce into Test server. */ public <K, V> KafkaConsumer<K, V> getKafkaConsumer( final Class<? extends Deserializer<K>> keyDeserializer, final Class<? extends Deserializer<V>> valueDeserializer, final Properties config ) { // Build config final Map<String, Object> kafkaConsumerConfig = buildDefaultClientConfig(); kafkaConsumerConfig.put("key.deserializer", keyDeserializer); kafkaConsumerConfig.put("value.deserializer", valueDeserializer); kafkaConsumerConfig.put("partition.assignment.strategy", "org.apache.kafka.clients.consumer.RoundRobinAssignor"); // Override config if (config != null) { for (final Map.Entry<Object, Object> entry: config.entrySet()) { kafkaConsumerConfig.put(entry.getKey().toString(), entry.getValue()); } } // Create and return Consumer. return new KafkaConsumer<>(kafkaConsumerConfig); }
Example #14
Source File: KafkaConsumerFromOffset.java From post-kafka-rewind-consumer-offset with MIT License | 6 votes |
public static void main(String[] args) { KafkaConsumer<String, String> consumer = createConsumer(); consumer.subscribe(Arrays.asList(TOPIC)); boolean flag = true; while (true) { ConsumerRecords<String, String> records = consumer.poll(100); if (flag) { Set<TopicPartition> assignments = consumer.assignment(); assignments.forEach(topicPartition -> consumer.seek( topicPartition, 90)); flag = false; } for (ConsumerRecord<String, String> record : records) System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value()); } }
Example #15
Source File: AtlasNotificationServerEmulator.java From nifi with Apache License 2.0 | 6 votes |
public void consume(Consumer<HookNotification> c) { Properties props = new Properties(); props.put("bootstrap.servers", "localhost:9092"); props.put("group.id", "test"); props.put("enable.auto.commit", "true"); props.put("auto.commit.interval.ms", "1000"); props.put("session.timeout.ms", "30000"); props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props); consumer.subscribe(Arrays.asList("ATLAS_HOOK")); isStopped = false; while (!isStopped) { ConsumerRecords<String, String> records = consumer.poll(100); for (ConsumerRecord<String, String> record : records) { final MessageDeserializer deserializer = NotificationInterface.NotificationType.HOOK.getDeserializer(); final HookNotification m = (HookNotification) deserializer.deserialize(record.value()); c.accept(m); } } consumer.close(); }
Example #16
Source File: OffsetCommitSyncBatch.java From BigData-In-Practice with Apache License 2.0 | 6 votes |
public static void main(String[] args) { KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create(); final int minBatchSize = 200; List<ConsumerRecord> buffer = new ArrayList<>(); while (true) { ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000)); for (ConsumerRecord<String, String> record : records) { buffer.add(record); System.out.println(record.offset() + " : " + record.value()); } if (buffer.size() >= minBatchSize) { //do some logical processing with buffer. consumer.commitSync(); buffer.clear(); } } }
Example #17
Source File: KafkaConsumeOrderWorkaround.java From flowing-retail with Apache License 2.0 | 6 votes |
@PostConstruct public void startConsuming() { consumerThread = new Thread("kafka-workaround-consumer") { public void run( ) { final Properties props = new Properties(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroup); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); consumer = new KafkaConsumer<>(props); consumer.subscribe(Collections.singletonList(topicName)); while (running) { consumer.poll(pollingInterval); consumer.commitAsync(); } consumer.close(); } }; consumerThread.start(); }
Example #18
Source File: KafkaClient.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
public static Map<Integer, Long> getLatestOffsets(final CubeInstance cubeInstance) { final KafkaConfig kafkaConfig = KafkaConfigManager.getInstance(KylinConfig.getInstanceFromEnv()).getKafkaConfig(cubeInstance.getRootFactTable()); final String brokers = KafkaClient.getKafkaBrokers(kafkaConfig); final String topic = kafkaConfig.getTopic(); Map<Integer, Long> startOffsets = Maps.newHashMap(); try (final KafkaConsumer consumer = KafkaClient.getKafkaConsumer(brokers, cubeInstance.getName())) { final List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic); for (PartitionInfo partitionInfo : partitionInfos) { long latest = getLatestOffset(consumer, topic, partitionInfo.partition()); startOffsets.put(partitionInfo.partition(), latest); } } return startOffsets; }
Example #19
Source File: KafkaCache.java From kcache with Apache License 2.0 | 5 votes |
private Consumer<byte[], byte[]> createConsumer() { Properties consumerProps = new Properties(); addKafkaCacheConfigsToClientProperties(consumerProps); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, this.groupId); consumerProps.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId); consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapBrokers); consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); return new KafkaConsumer<>(consumerProps); }
Example #20
Source File: ITKafkaStreamsTracing.java From brave with Apache License 2.0 | 5 votes |
Consumer<String, String> createTracingConsumer(String... topics) { if (topics.length == 0) { topics = new String[] {testName.getMethodName()}; } KafkaConsumer<String, String> consumer = kafka.helper().createStringConsumer(); List<TopicPartition> assignments = new ArrayList<>(); for (String topic : topics) { assignments.add(new TopicPartition(topic, 0)); } consumer.assign(assignments); return KafkaTracing.create(tracing).consumer(consumer); }
Example #21
Source File: ParallelWebKafkaConsumer.java From kafka-webview with MIT License | 5 votes |
private List<PartitionOffset> getTailOffsets(final KafkaConsumer<?,?> kafkaConsumer) { final Map<TopicPartition, Long> results = kafkaConsumer.endOffsets(getAllPartitions(kafkaConsumer)); final List<PartitionOffset> offsets = new ArrayList<>(); for (final Map.Entry<TopicPartition, Long> entry : results.entrySet()) { offsets.add(new PartitionOffset(entry.getKey().partition(), entry.getValue())); } return offsets; }
Example #22
Source File: KafkaEventChannel.java From jstarcraft-core with Apache License 2.0 | 5 votes |
@Override public void registerMonitor(Set<Class> types, EventMonitor monitor) { try { for (Class type : types) { EventManager manager = managers.get(type); String group = name + StringUtility.DOT + type.getName(); if (manager == null) { manager = new EventManager(); managers.put(type, manager); Properties properties = new Properties(); properties.put("bootstrap.servers", connections); properties.put("key.deserializer", keyDeserializer); properties.put("value.deserializer", valueDeserializer); switch (mode) { case QUEUE: { properties.put("group.id", group); properties.put("auto.offset.reset", "earliest"); break; } case TOPIC: { properties.put("group.id", group + UUID.randomUUID()); properties.put("auto.offset.reset", "latest"); break; } } KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(properties); consumer.subscribe(Collections.singleton(group)); // TODO 此处是为了防止auto.offset.reset为latest时,可能会丢失第一次poll之前的消息. updateAssignmentMetadata.invoke(consumer, Time.SYSTEM.timer(Long.MAX_VALUE)); consumers.put(type, consumer); EventThread thread = new EventThread(type, manager); thread.start(); threads.put(type, thread); } manager.attachMonitor(monitor); } } catch (Exception exception) { throw new RuntimeException(exception); } }
Example #23
Source File: DemoConsumerAssign.java From KafkaExample with Apache License 2.0 | 5 votes |
public static void main(String[] args) { args = new String[] { "kafka0:9092", "topic1", "group1", "consumer3" }; if (args == null || args.length != 4) { System.err.println( "Usage:\n\tjava -jar kafka_consumer.jar ${bootstrap_server} ${topic_name} ${group_name} ${client_id}"); System.exit(1); } String bootstrap = args[0]; String topic = args[1]; String groupid = args[2]; String clientid = args[3]; Properties props = new Properties(); props.put("bootstrap.servers", bootstrap); props.put("group.id", groupid); props.put("client.id", clientid); props.put("enable.auto.commit", "true"); props.put("auto.commit.interval.ms", "1000"); props.put("key.deserializer", StringDeserializer.class.getName()); props.put("value.deserializer", StringDeserializer.class.getName()); props.put("auto.offset.reset", "earliest"); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props); consumer.assign(Arrays.asList(new TopicPartition(topic, 0), new TopicPartition(topic, 1))); while (true) { ConsumerRecords<String, String> records = consumer.poll(100); records.forEach(record -> { System.out.printf("client : %s , topic: %s , partition: %d , offset = %d, key = %s, value = %s%n", clientid, record.topic(), record.partition(), record.offset(), record.key(), record.value()); }); } }
Example #24
Source File: CodecEndpoint.java From quarkus with Apache License 2.0 | 5 votes |
public static KafkaConsumer<String, Pet> createPetConsumer() { Properties props = new Properties(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:19092"); props.put(ConsumerConfig.GROUP_ID_CONFIG, "pet"); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, PetCodec.class.getName()); props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); KafkaConsumer<String, Pet> consumer = new KafkaConsumer<>(props); consumer.subscribe(Collections.singletonList("pets")); return consumer; }
Example #25
Source File: KafkaBinderTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
private KafkaConsumer getKafkaConsumer(Binding binding) { DirectFieldAccessor bindingAccessor = new DirectFieldAccessor(binding); KafkaMessageDrivenChannelAdapter adapter = (KafkaMessageDrivenChannelAdapter) bindingAccessor .getPropertyValue("lifecycle"); DirectFieldAccessor adapterAccessor = new DirectFieldAccessor(adapter); ConcurrentMessageListenerContainer messageListenerContainer = (ConcurrentMessageListenerContainer) adapterAccessor .getPropertyValue("messageListenerContainer"); DirectFieldAccessor containerAccessor = new DirectFieldAccessor( messageListenerContainer); DefaultKafkaConsumerFactory consumerFactory = (DefaultKafkaConsumerFactory) containerAccessor .getPropertyValue("consumerFactory"); return (KafkaConsumer) consumerFactory.createConsumer(); }
Example #26
Source File: KafkaConsumerMetricsTest.java From micrometer with Apache License 2.0 | 5 votes |
private Consumer<Long, String> createConsumer() { Properties props = new Properties(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS); props.put(ConsumerConfig.GROUP_ID_CONFIG, "MicrometerTestConsumer"); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class.getName()); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); Consumer<Long, String> consumer = new KafkaConsumer<>(props); consumer.subscribe(Collections.singletonList(TOPIC)); consumerCount++; return consumer; }
Example #27
Source File: KafkaConsumerCommand.java From azeroth with Apache License 2.0 | 5 votes |
public void close() { adminClient.close(); if (kafkaConsumers != null) { for (KafkaConsumer<String, Serializable> kafkaConsumer : kafkaConsumers.values()) { kafkaConsumer.close(); } } }
Example #28
Source File: KafkaConsumerThreadTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test(timeout = 10000) public void testCloseWithoutAssignedPartitions() throws Exception { // no initial assignment final KafkaConsumer<byte[], byte[]> mockConsumer = createMockConsumer( new LinkedHashMap<TopicPartition, Long>(), Collections.<TopicPartition, Long>emptyMap(), false, null, null); // setup latch so the test waits until testThread is blocked on getBatchBlocking method final MultiShotLatch getBatchBlockingInvoked = new MultiShotLatch(); final ClosableBlockingQueue<KafkaTopicPartitionState<TopicPartition>> unassignedPartitionsQueue = new ClosableBlockingQueue<KafkaTopicPartitionState<TopicPartition>>() { @Override public List<KafkaTopicPartitionState<TopicPartition>> getBatchBlocking() throws InterruptedException { getBatchBlockingInvoked.trigger(); return super.getBatchBlocking(); } }; final TestKafkaConsumerThread testThread = new TestKafkaConsumerThread(mockConsumer, unassignedPartitionsQueue, new Handover()); testThread.start(); getBatchBlockingInvoked.await(); testThread.shutdown(); testThread.join(); }
Example #29
Source File: ParallelWebKafkaConsumer.java From kafka-webview with MIT License | 5 votes |
private ConsumerState getConsumerState(final KafkaConsumer kafkaConsumer) { final List<PartitionOffset> offsets = new ArrayList<>(); for (final TopicPartition topicPartition: getAllPartitions(kafkaConsumer)) { final long offset = kafkaConsumer.position(topicPartition); offsets.add(new PartitionOffset(topicPartition.partition(), offset)); } return new ConsumerState(clientConfig.getTopicConfig().getTopicName(), offsets); }
Example #30
Source File: KafkaConsumerOffsets.java From microservices-testing-examples with MIT License | 5 votes |
public KafkaConsumerOffsets(String host, Integer port, String groupId) { Properties properties = new Properties(); properties.put(BOOTSTRAP_SERVERS_CONFIG, host + ":" + port); properties.put(KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); properties.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); properties.put(GROUP_ID_CONFIG, groupId); this.consumer = new KafkaConsumer<>(properties); }