Java Code Examples for org.apache.kafka.clients.consumer.Consumer#commitSync()
The following examples show how to use
org.apache.kafka.clients.consumer.Consumer#commitSync() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ConsumerExample.java From pulsar with Apache License 2.0 | 8 votes |
public static void main(String[] args) { String topic = "persistent://public/default/test"; Properties props = new Properties(); props.put("bootstrap.servers", "pulsar://localhost:6650"); props.put("group.id", "my-subscription-name"); props.put("enable.auto.commit", "false"); props.put("key.deserializer", IntegerDeserializer.class.getName()); props.put("value.deserializer", StringDeserializer.class.getName()); @SuppressWarnings("resource") Consumer<Integer, String> consumer = new KafkaConsumer<>(props); consumer.subscribe(Arrays.asList(topic)); while (true) { ConsumerRecords<Integer, String> records = consumer.poll(100); records.forEach(record -> { log.info("Received record: {}", record); }); // Commit last offset consumer.commitSync(); } }
Example 2
Source File: BookListener.java From micronaut-kafka with Apache License 2.0 | 6 votes |
@Topic("all-the-books") public void receive( List<Book> books, List<Long> offsets, List<Integer> partitions, List<String> topics, Consumer kafkaConsumer) { // <1> for (int i = 0; i < books.size(); i++) { // process the book Book book = books.get(i); // <2> // commit offsets String topic = topics.get(i); int partition = partitions.get(i); long offset = offsets.get(i); // <3> kafkaConsumer.commitSync(Collections.singletonMap( // <4> new TopicPartition(topic, partition), new OffsetAndMetadata(offset + 1, "my metadata") )); } }
Example 3
Source File: ProductListener.java From micronaut-kafka with Apache License 2.0 | 6 votes |
@KafkaListener( offsetReset = OffsetReset.EARLIEST, offsetStrategy = OffsetStrategy.DISABLED // <1> ) @Topic("awesome-products") void receive( Product product, long offset, int partition, String topic, Consumer kafkaConsumer) { // <2> // process product record // commit offsets kafkaConsumer.commitSync(Collections.singletonMap( // <3> new TopicPartition(topic, partition), new OffsetAndMetadata(offset + 1, "my metadata") )); }
Example 4
Source File: OffsetSource.java From kafka-backup with Apache License 2.0 | 6 votes |
public void syncGroupForOffset(TopicPartition topicPartition, long sourceOffset, long targetOffset) { OffsetStoreFile offsetStoreFile = topicOffsets.get(topicPartition); // __consumer_offsets contains the offset of the message to read next. So we need to search for the offset + 1 // if we do not do that we might miss List<String> groups = offsetStoreFile.groupForOffset(sourceOffset + 1); if (groups != null && groups.size() > 0) { for (String group : groups) { Map<String, Object> groupConsumerConfig = new HashMap<>(consumerConfig); groupConsumerConfig.put("group.id", group); Consumer<byte[], byte[]> consumer = new KafkaConsumer<>(groupConsumerConfig); consumer.assign(Collections.singletonList(topicPartition)); // ! Target Offset + 1 as we commit the offset of the "next message to read" OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(targetOffset + 1); Map<TopicPartition, OffsetAndMetadata> offsets = Collections.singletonMap(topicPartition, offsetAndMetadata); consumer.commitSync(offsets); consumer.close(); log.debug("Committed target offset {} for group {} for topic {} partition {}", (targetOffset + 1), group, topicPartition.topic(), topicPartition.partition()); } } }
Example 5
Source File: KafkaNativeSerializationApplicationTests.java From spring-cloud-stream-samples with Apache License 2.0 | 6 votes |
@Test public void testSendReceive() { Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka.getEmbeddedKafka()); senderProps.put("value.serializer", StringSerializer.class); DefaultKafkaProducerFactory<byte[], String> pf = new DefaultKafkaProducerFactory<>(senderProps); KafkaTemplate<byte[], String> template = new KafkaTemplate<>(pf, true); template.setDefaultTopic(INPUT_TOPIC); template.sendDefault("foo"); Map<String, Object> consumerProps = KafkaTestUtils.consumerProps(GROUP_NAME, "false", embeddedKafka.getEmbeddedKafka()); consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerProps.put("value.deserializer", MyJsonDeserializer.class); DefaultKafkaConsumerFactory<byte[], Person> cf = new DefaultKafkaConsumerFactory<>(consumerProps); Consumer<byte[], Person> consumer = cf.createConsumer(); consumer.subscribe(Collections.singleton(OUTPUT_TOPIC)); ConsumerRecords<byte[], Person> records = consumer.poll(Duration.ofSeconds(10)); consumer.commitSync(); assertThat(records.count()).isEqualTo(1); assertThat(new String(records.iterator().next().value().getName())).isEqualTo("foo"); }
Example 6
Source File: ConsumerExample.java From pulsar with Apache License 2.0 | 6 votes |
public static void main(String[] args) { String topic = "persistent://public/default/test"; Properties props = new Properties(); props.put("bootstrap.servers", "pulsar://localhost:6650"); props.put("group.id", "my-subscription-name"); props.put("enable.auto.commit", "false"); props.put("key.deserializer", IntegerDeserializer.class.getName()); props.put("value.deserializer", StringDeserializer.class.getName()); @SuppressWarnings("resource") Consumer<Integer, String> consumer = new KafkaConsumer<>(props); consumer.subscribe(Arrays.asList(topic)); while (true) { ConsumerRecords<Integer, String> records = consumer.poll(100); records.forEach(record -> { log.info("Received record: {}", record); }); // Commit last offset consumer.commitSync(); } }
Example 7
Source File: KafkaConsumerHelper.java From zerocode with Apache License 2.0 | 5 votes |
public static void handleCommitSyncAsync(Consumer<Long, String> consumer, ConsumerCommonConfigs consumerCommonConfigs, ConsumerLocalConfigs consumeLocalTestProps) { if (consumeLocalTestProps == null) { LOGGER.warn("[No local test configs]-Kafka client neither did `commitAsync()` nor `commitSync()`"); return; } Boolean effectiveCommitSync; Boolean effectiveCommitAsync; Boolean localCommitSync = consumeLocalTestProps.getCommitSync(); Boolean localCommitAsync = consumeLocalTestProps.getCommitAsync(); if (localCommitSync == null && localCommitAsync == null) { effectiveCommitSync = consumerCommonConfigs.getCommitSync(); effectiveCommitAsync = consumerCommonConfigs.getCommitAsync(); } else { effectiveCommitSync = localCommitSync; effectiveCommitAsync = localCommitAsync; } if (effectiveCommitSync != null && effectiveCommitSync == true) { consumer.commitSync(); } else if (effectiveCommitAsync != null && effectiveCommitAsync == true) { consumer.commitAsync(); } else { LOGGER.warn("Kafka client neither configured for `commitAsync()` nor `commitSync()`"); } // -------------------------------------------------------- // Leave this to the user to "commit" the offset explicitly // -------------------------------------------------------- }
Example 8
Source File: ConsumerAvroExample.java From pulsar with Apache License 2.0 | 5 votes |
public static void main(String[] args) { String topic = "persistent://public/default/test-avro"; Properties props = new Properties(); props.put("bootstrap.servers", "pulsar://localhost:6650"); props.put("group.id", "my-subscription-name"); props.put("enable.auto.commit", "false"); props.put("key.deserializer", IntegerDeserializer.class.getName()); props.put("value.deserializer", StringDeserializer.class.getName()); AvroSchema<Bar> barSchema = AvroSchema.of(SchemaDefinition.<Bar>builder().withPojo(Bar.class).build()); AvroSchema<Foo> fooSchema = AvroSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build()); Bar bar = new Bar(); bar.setField1(true); Foo foo = new Foo(); foo.setField1("field1"); foo.setField2("field2"); foo.setField3(3); @SuppressWarnings("resource") Consumer<Foo, Bar> consumer = new KafkaConsumer<>(props, fooSchema, barSchema); consumer.subscribe(Arrays.asList(topic)); while (true) { ConsumerRecords<Foo, Bar> records = consumer.poll(100); records.forEach(record -> { log.info("Received record: {}", record); }); // Commit last offset consumer.commitSync(); } }
Example 9
Source File: ConsumerAvroExample.java From pulsar with Apache License 2.0 | 5 votes |
public static void main(String[] args) { String topic = "persistent://public/default/test-avro"; Properties props = new Properties(); props.put("bootstrap.servers", "pulsar://localhost:6650"); props.put("group.id", "my-subscription-name"); props.put("enable.auto.commit", "false"); props.put("key.deserializer", IntegerDeserializer.class.getName()); props.put("value.deserializer", StringDeserializer.class.getName()); AvroSchema<Bar> barSchema = AvroSchema.of(SchemaDefinition.<Bar>builder().withPojo(Bar.class).build()); AvroSchema<Foo> fooSchema = AvroSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build()); Bar bar = new Bar(); bar.setField1(true); Foo foo = new Foo(); foo.setField1("field1"); foo.setField2("field2"); foo.setField3(3); @SuppressWarnings("resource") Consumer<Foo, Bar> consumer = new KafkaConsumer<>(props, fooSchema, barSchema); consumer.subscribe(Arrays.asList(topic)); while (true) { ConsumerRecords<Foo, Bar> records = consumer.poll(100); records.forEach(record -> { log.info("Received record: {}", record); }); // Commit last offset consumer.commitSync(); } }
Example 10
Source File: LiKafkaInstrumentedConsumerImpl.java From li-apache-kafka-clients with BSD 2-Clause "Simplified" License | 4 votes |
/** * close any existing delegate client and create a new one based on the * latest config overrides * @param abortIfExists abort the operation if current delegate is not null * @return true if delegate client actually replaced */ private boolean recreateDelegate(boolean abortIfExists) { try (@SuppressWarnings("unused") CloseableLock swLock = new CloseableLock(delegateLock.writeLock())) { Set<TopicPartition> pausedPartitions = null; Consumer<K, V> prevConsumer = delegate; if (prevConsumer != null) { if (abortIfExists) { return false; //leave existing delegate as-is } pausedPartitions = prevConsumer.paused(); delegate = null; try { try { prevConsumer.commitSync(Duration.ofSeconds(30)); } finally { prevConsumer.close(Duration.ofSeconds(10)); } } catch (Exception e) { LOG.error("error closing old delegate consumer", e); } } if (closing) { return false; } delegate = consumerFactory.create(baseConfig, LiKafkaClientsUtils.convertConfigMapToProperties(configOverrides)); if (subscriptionPattern != null) { if (rebalanceListener != null) { delegate.subscribe(subscriptionPattern, rebalanceListener); } else { delegate.subscribe(subscriptionPattern); } } else if (subscribedTopics != null) { if (rebalanceListener != null) { delegate.subscribe(subscribedTopics, rebalanceListener); } else { delegate.subscribe(subscribedTopics); } } else if (assignedPartitions != null) { delegate.assign(assignedPartitions); } if (pausedPartitions != null && !pausedPartitions.isEmpty()) { //TODO - this may throw exception if rebalance hasnt completed. test this delegate.pause(pausedPartitions); } return true; } }
Example 11
Source File: KafkaApiTest.java From pulsar with Apache License 2.0 | 4 votes |
@Test(timeOut = 30000) public void testSimpleProducerConsumer() throws Exception { String topic = "persistent://public/default/testSimpleProducerConsumer"; Properties producerProperties = new Properties(); producerProperties.put("bootstrap.servers", getPlainTextServiceUrl()); producerProperties.put("key.serializer", IntegerSerializer.class.getName()); producerProperties.put("value.serializer", StringSerializer.class.getName()); Producer<Integer, String> producer = new KafkaProducer<>(producerProperties); Properties consumerProperties = new Properties(); consumerProperties.put("bootstrap.servers", getPlainTextServiceUrl()); consumerProperties.put("group.id", "my-subscription-name"); consumerProperties.put("key.deserializer", IntegerDeserializer.class.getName()); consumerProperties.put("value.deserializer", StringDeserializer.class.getName()); consumerProperties.put("enable.auto.commit", "true"); Consumer<Integer, String> consumer = new KafkaConsumer<>(consumerProperties); consumer.subscribe(Arrays.asList(topic)); List<Long> offsets = new ArrayList<>(); for (int i = 0; i < 10; i++) { RecordMetadata md = producer.send(new ProducerRecord<Integer, String>(topic, i, "hello-" + i)).get(); offsets.add(md.offset()); log.info("Published message at {}", Long.toHexString(md.offset())); } producer.flush(); producer.close(); AtomicInteger received = new AtomicInteger(); while (received.get() < 10) { ConsumerRecords<Integer, String> records = consumer.poll(100); records.forEach(record -> { assertEquals(record.key().intValue(), received.get()); assertEquals(record.value(), "hello-" + received.get()); assertEquals(record.offset(), offsets.get(received.get()).longValue()); received.incrementAndGet(); }); consumer.commitSync(); } consumer.close(); }
Example 12
Source File: KafkaApiTest.java From pulsar with Apache License 2.0 | 4 votes |
@Test public void testProducerConsumerAvroSchemaWithPulsarKafkaClient() throws Exception { String topic = "testProducerConsumerAvroSchemaWithPulsarKafkaClient"; AvroSchema<Bar> barSchema = AvroSchema.of(SchemaDefinition.<Bar>builder().withPojo(Bar.class).build()); AvroSchema<Foo> fooSchema = AvroSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build()); Properties props = new Properties(); props.put("bootstrap.servers", getPlainTextServiceUrl()); props.put("group.id", "my-subscription-name"); props.put("enable.auto.commit", "false"); props.put("key.serializer", IntegerSerializer.class.getName()); props.put("value.serializer", StringSerializer.class.getName()); props.put("key.deserializer", StringDeserializer.class.getName()); props.put("value.deserializer", StringDeserializer.class.getName()); @Cleanup Consumer<Bar, Foo> consumer = new KafkaConsumer<>(props, barSchema, fooSchema); consumer.subscribe(Arrays.asList(topic)); Producer<Bar, Foo> producer = new KafkaProducer<>(props, barSchema, fooSchema); for (int i = 0; i < 10; i++) { Bar bar = new Bar(); bar.setField1(true); Foo foo = new Foo(); foo.setField1("field1"); foo.setField2("field2"); foo.setField3(i); producer.send(new ProducerRecord<>(topic, bar, foo)); } producer.flush(); producer.close(); AtomicInteger received = new AtomicInteger(); while (received.get() < 10) { ConsumerRecords<Bar, Foo> records = consumer.poll(100); if (!records.isEmpty()) { records.forEach(record -> { Bar key = record.key(); Assert.assertTrue(key.isField1()); Foo value = record.value(); Assert.assertEquals(value.getField1(), "field1"); Assert.assertEquals(value.getField2(), "field2"); Assert.assertEquals(value.getField3(), received.get()); received.incrementAndGet(); }); consumer.commitSync(); } } }
Example 13
Source File: KafkaApiTest.java From pulsar with Apache License 2.0 | 4 votes |
@Test public void testProducerConsumerJsonSchemaWithPulsarKafkaClient() throws Exception { String topic = "testProducerConsumerJsonSchemaWithPulsarKafkaClient"; JSONSchema<Bar> barSchema = JSONSchema.of(SchemaDefinition.<Bar>builder().withPojo(Bar.class).build()); JSONSchema<Foo> fooSchema = JSONSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build()); Properties props = new Properties(); props.put("bootstrap.servers", getPlainTextServiceUrl()); props.put("group.id", "my-subscription-name"); props.put("enable.auto.commit", "false"); props.put("key.serializer", IntegerSerializer.class.getName()); props.put("value.serializer", StringSerializer.class.getName()); props.put("key.deserializer", StringDeserializer.class.getName()); props.put("value.deserializer", StringDeserializer.class.getName()); @Cleanup Consumer<Bar, Foo> consumer = new KafkaConsumer<>(props, barSchema, fooSchema); consumer.subscribe(Arrays.asList(topic)); Producer<Bar, Foo> producer = new KafkaProducer<>(props, barSchema, fooSchema); for (int i = 0; i < 10; i++) { Bar bar = new Bar(); bar.setField1(true); Foo foo = new Foo(); foo.setField1("field1"); foo.setField2("field2"); foo.setField3(i); producer.send(new ProducerRecord<>(topic, bar, foo)); } producer.flush(); producer.close(); AtomicInteger received = new AtomicInteger(); while (received.get() < 10) { ConsumerRecords<Bar, Foo> records = consumer.poll(100); if (!records.isEmpty()) { records.forEach(record -> { Bar key = record.key(); Assert.assertTrue(key.isField1()); Foo value = record.value(); Assert.assertEquals(value.getField1(), "field1"); Assert.assertEquals(value.getField2(), "field2"); Assert.assertEquals(value.getField3(), received.get()); received.incrementAndGet(); }); consumer.commitSync(); } } }
Example 14
Source File: KafkaApiTest.java From pulsar with Apache License 2.0 | 4 votes |
@Test public void testProducerConsumerMixedSchemaWithPulsarKafkaClient() throws Exception { String topic = "testProducerConsumerMixedSchemaWithPulsarKafkaClient"; Schema<String> keySchema = new PulsarKafkaSchema<>(new StringSerializer(), new StringDeserializer()); JSONSchema<Foo> valueSchema = JSONSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build()); Properties props = new Properties(); props.put("bootstrap.servers", getPlainTextServiceUrl()); props.put("group.id", "my-subscription-name"); props.put("enable.auto.commit", "false"); props.put("key.serializer", IntegerSerializer.class.getName()); props.put("value.serializer", StringSerializer.class.getName()); props.put("key.deserializer", StringDeserializer.class.getName()); props.put("value.deserializer", StringDeserializer.class.getName()); @Cleanup Consumer<String, Foo> consumer = new KafkaConsumer<>(props, keySchema, valueSchema); consumer.subscribe(Arrays.asList(topic)); Producer<String, Foo> producer = new KafkaProducer<>(props, keySchema, valueSchema); for (int i = 0; i < 10; i++) { Foo foo = new Foo(); foo.setField1("field1"); foo.setField2("field2"); foo.setField3(i); producer.send(new ProducerRecord<>(topic, "hello" + i, foo)); } producer.flush(); producer.close(); AtomicInteger received = new AtomicInteger(); while (received.get() < 10) { ConsumerRecords<String, Foo> records = consumer.poll(100); if (!records.isEmpty()) { records.forEach(record -> { String key = record.key(); Assert.assertEquals(key, "hello" + received.get()); Foo value = record.value(); Assert.assertEquals(value.getField1(), "field1"); Assert.assertEquals(value.getField2(), "field2"); Assert.assertEquals(value.getField3(), received.get()); received.incrementAndGet(); }); consumer.commitSync(); } } }