Java Code Examples for org.apache.kafka.clients.consumer.KafkaConsumer#commitSync()
The following examples show how to use
org.apache.kafka.clients.consumer.KafkaConsumer#commitSync() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: OffsetCommitSyncPartition.java From BigData-In-Practice with Apache License 2.0 | 8 votes |
public static void main(String[] args) { KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create(); try { while (true) { ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000)); for (TopicPartition partition : records.partitions()) { List<ConsumerRecord<String, String>> partitionRecords = records.records(partition); for (ConsumerRecord<String, String> record : partitionRecords) { //do some logical processing. } long lastConsumedOffset = partitionRecords.get(partitionRecords.size() - 1).offset(); consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastConsumedOffset + 1))); } } } finally { consumer.close(); } }
Example 2
Source File: OffsetCommitSyncBatch.java From kafka_book_demo with Apache License 2.0 | 6 votes |
public static void main(String[] args) { Properties props = initConfig(); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props); consumer.subscribe(Arrays.asList(topic)); final int minBatchSize = 200; List<ConsumerRecord> buffer = new ArrayList<>(); while (running.get()) { ConsumerRecords<String, String> records = consumer.poll(1000); for (ConsumerRecord<String, String> record : records) { buffer.add(record); } if (buffer.size() >= minBatchSize) { //do some logical processing with buffer. consumer.commitSync(); buffer.clear(); } } }
Example 3
Source File: OffsetCommitSync.java From kafka_book_demo with Apache License 2.0 | 6 votes |
public static void main(String[] args) { Properties props = initConfig(); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props); consumer.subscribe(Arrays.asList(topic)); try { while (running.get()) { ConsumerRecords<String, String> records = consumer.poll(1000); for (ConsumerRecord<String, String> record : records) { //do some logical processing. } consumer.commitSync(); } } finally { consumer.close(); } }
Example 4
Source File: SimulateResultService.java From SkaETL with Apache License 2.0 | 6 votes |
public List<SimulateData> readOutPut(String bootStrapServers, String maxRecords, String windowTime) { KafkaConsumer kafkaConsumer = kafkaUtils.kafkaConsumer("latest", bootStrapServers, "simulate"); log.info("Subscribe Topic for {}", SIMULATE_OUTPUT); kafkaConsumer.subscribe(Arrays.asList(SIMULATE_OUTPUT), new Rebalancer()); List<SimulateData> res = new ArrayList<>(); long start = System.currentTimeMillis(); try { while (checkWindow(start, Long.valueOf(windowTime), res.size(), Long.valueOf(maxRecords))) { ConsumerRecords<String, SimulateData> records = kafkaConsumer.poll(100); for (ConsumerRecord<String, SimulateData> record : records) { res.add(record.value()); } log.info("Number item for read OutPut {}", res.size()); kafkaConsumer.commitSync(); } } catch (WakeupException e) { // Ignore exception if closing throw e; } catch (RuntimeException re) { log.error("RuntimeException {}", re); } finally { kafkaConsumer.close(); } return res; }
Example 5
Source File: OffsetCommitSyncBatch.java From BigData-In-Practice with Apache License 2.0 | 6 votes |
public static void main(String[] args) { KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create(); final int minBatchSize = 200; List<ConsumerRecord> buffer = new ArrayList<>(); while (true) { ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000)); for (ConsumerRecord<String, String> record : records) { buffer.add(record); System.out.println(record.offset() + " : " + record.value()); } if (buffer.size() >= minBatchSize) { //do some logical processing with buffer. consumer.commitSync(); buffer.clear(); } } }
Example 6
Source File: ConsumerManual.java From javatech with Creative Commons Attribution Share Alike 4.0 International | 6 votes |
public static void main(String[] args) { Properties props = new Properties(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, HOST); props.put(ConsumerConfig.GROUP_ID_CONFIG, "test"); props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props); consumer.subscribe(Arrays.asList("t1", "t2")); final int minBatchSize = 200; List<ConsumerRecord<String, String>> buffer = new ArrayList<>(); while (true) { ConsumerRecords<String, String> records = consumer.poll(100); for (ConsumerRecord<String, String> record : records) { buffer.add(record); } if (buffer.size() >= minBatchSize) { // 逻辑处理,例如保存到数据库 consumer.commitSync(); buffer.clear(); } } }
Example 7
Source File: Kafka0_10ConsumerLoader.java From datacollector with Apache License 2.0 | 5 votes |
private void setOffsetsByTimestamp(String topic, KafkaConsumer kafkaAuxiliaryConsumer) { // Build map of topics partitions and timestamp to use when searching offset for that partition (same timestamp // for all the partitions) List<PartitionInfo> partitionInfoList = kafkaAuxiliaryConsumer.partitionsFor(topic); if (partitionInfoList != null) { Map<TopicPartition, Long> partitionsAndTimestampMap = partitionInfoList.stream().map(e -> new TopicPartition( topic, e.partition() )).collect(Collectors.toMap(e -> e, (e) -> timestampToSearchOffsets)); // Get Offsets by timestamp using previously built map and commit them to corresponding partition if (!partitionsAndTimestampMap.isEmpty()) { Map<TopicPartition, OffsetAndTimestamp> partitionsOffsets = kafkaAuxiliaryConsumer.offsetsForTimes( partitionsAndTimestampMap); if (partitionsOffsets != null && !partitionsOffsets.isEmpty()) { Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = partitionsOffsets.entrySet().stream().filter( entry -> entry.getKey() != null && entry.getValue() != null).collect( Collectors.toMap(entry -> entry.getKey(), entry -> new OffsetAndMetadata(entry.getValue().offset()))); if (!offsetsToCommit.isEmpty()) { kafkaAuxiliaryConsumer.commitSync(offsetsToCommit); } } } } }
Example 8
Source File: EventuateKafkaConsumer.java From light-eventuate-4j with Apache License 2.0 | 5 votes |
private void maybeCommitOffsets(KafkaConsumer<String, String> consumer, KafkaMessageProcessor processor) { Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = processor.offsetsToCommit(); if (!offsetsToCommit.isEmpty()) { logger.debug("Committing offsets {} {}", subscriberId, offsetsToCommit); consumer.commitSync(offsetsToCommit); logger.debug("Committed offsets {}", subscriberId); processor.noteOffsetsCommitted(offsetsToCommit); } }
Example 9
Source File: KafkaAvroSerDesWithKafkaServerTest.java From registry with Apache License 2.0 | 5 votes |
private ConsumerRecords<String, Object> consumeMessage(String topicName, String bootstrapServers, String consumerGroup) { Map<String, Object> props = new HashMap<>(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); props.putAll(SCHEMA_REGISTRY_TEST_SERVER_CLIENT_WRAPPER.exportClientConf(true)); props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroup); props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000"); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class.getName()); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class.getName()); KafkaConsumer<String, Object> consumer = new KafkaConsumer<>(props); List<PartitionInfo> partitionInfos = consumer.partitionsFor(topicName); Collection<TopicPartition> partitions = new ArrayList<>(); for (PartitionInfo partitionInfo : partitionInfos) { partitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())); } LOG.info("partitions [{}]", partitions); LOG.info("subscribed topis: [{}] ", consumer.listTopics()); consumer.assign(partitions); consumer.seekToBeginning(partitions); ConsumerRecords<String, Object> consumerRecords = null; int ct = 0; while (ct++ < 100 && (consumerRecords == null || consumerRecords.isEmpty())) { LOG.info("Polling for consuming messages"); consumerRecords = consumer.poll(Duration.ofMillis(500)); } consumer.commitSync(); consumer.close(); return consumerRecords; }
Example 10
Source File: AtLeastOnceConsumer.java From javabase with Apache License 2.0 | 5 votes |
private static void processRecords(KafkaConsumer<String, String> consumer) throws InterruptedException { while (true) { ConsumerRecords<String, String> records = consumer.poll(100); long lastOffset = 0; for (ConsumerRecord<String, String> record : records) { System.out.printf("\n\roffset = %d, key = %s, value = %s", record.offset(), record.key(), record.value()); lastOffset = record.offset(); } System.out.println("lastOffset read: " + lastOffset); process(); //如果我们注释下面这行,消费者消费消息的话不会提交offset 会重复消费信息 consumer.commitSync(); } }
Example 11
Source File: OffsetCommitSyncSingle.java From BigData-In-Practice with Apache License 2.0 | 5 votes |
public static void main(String[] args) { KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create(); try { while (true) { ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000)); for (ConsumerRecord<String, String> record : records) { //do some logical processing. long offset = record.offset(); TopicPartition partition = new TopicPartition(record.topic(), record.partition()); consumer.commitSync(Collections .singletonMap(partition, new OffsetAndMetadata(offset + 1))); } } // TopicPartition tp1 = new TopicPartition(topic, 0); // TopicPartition tp2 = new TopicPartition(topic, 1); // TopicPartition tp3 = new TopicPartition(topic, 2); // TopicPartition tp4 = new TopicPartition(topic, 3); // System.out.println(consumer.committed(tp1) + " : " + consumer.position(tp1)); // System.out.println(consumer.committed(tp2) + " : " + consumer.position(tp2)); // System.out.println(consumer.committed(tp3) + " : " + consumer.position(tp3)); // System.out.println(consumer.committed(tp4) + " : " + consumer.position(tp4)); } finally { consumer.close(); } }
Example 12
Source File: KafkaConsumerExample.java From client-examples with Apache License 2.0 | 5 votes |
public static void main(String[] args) { KafkaConsumerConfig config = KafkaConsumerConfig.fromEnv(); Properties props = KafkaConsumerConfig.createProperties(config); int receivedMsgs = 0; if (System.getenv("JAEGER_SERVICE_NAME") != null) { Tracer tracer = Configuration.fromEnv().getTracer(); GlobalTracer.registerIfAbsent(tracer); props.put(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, TracingConsumerInterceptor.class.getName()); } boolean commit = !Boolean.parseBoolean(config.getEnableAutoCommit()); KafkaConsumer consumer = new KafkaConsumer(props); consumer.subscribe(Collections.singletonList(config.getTopic())); while (receivedMsgs < config.getMessageCount()) { ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE); for (ConsumerRecord<String, String> record : records) { log.info("Received message:"); log.info("\tpartition: {}", record.partition()); log.info("\toffset: {}", record.offset()); log.info("\tvalue: {}", record.value()); receivedMsgs++; } if (commit) { consumer.commitSync(); } } }
Example 13
Source File: NativeKafkaWithAvroDecoderTest.java From hermes with Apache License 2.0 | 4 votes |
@Test public void testNative() throws IOException, InterruptedException, ExecutionException { final String topic = "kafka.SimpleAvroTopic"; int msgNum = 200; final CountDownLatch countDown = new CountDownLatch(msgNum); Properties producerProps = new Properties(); producerProps.put("bootstrap.servers", ""); // Avro Decoder/Encoder CachedSchemaRegistryClient schemaRegistry = new CachedSchemaRegistryClient("", AbstractKafkaAvroSerDeConfig.MAX_SCHEMAS_PER_SUBJECT_DEFAULT); Map<String, String> configs = new HashMap<String, String>(); configs.put("schema.registry.url", ""); KafkaAvroSerializer avroKeySerializer = new KafkaAvroSerializer(); avroKeySerializer.configure(configs, true); KafkaAvroSerializer avroValueSerializer = new KafkaAvroSerializer(); avroValueSerializer.configure(configs, false); Map<String, String> deserializerConfigs = new HashMap<String, String>(); deserializerConfigs.put("specific.avro.reader", Boolean.TRUE.toString()); deserializerConfigs.put("schema.registry.url", ""); KafkaAvroDeserializer avroKeyDeserializer = new KafkaAvroDeserializer(schemaRegistry, deserializerConfigs); avroKeyDeserializer.configure(configs, true); KafkaAvroDeserializer avroValueDeserializer = new KafkaAvroDeserializer(schemaRegistry, deserializerConfigs); avroValueDeserializer.configure(configs, false); // Consumer final Properties consumerProps = new Properties(); consumerProps.put("bootstrap.servers", ""); consumerProps.put("group.id", "GROUP_" + topic); final List<Object> actualResult = new ArrayList<Object>(); final List<Object> expectedResult = new ArrayList<Object>(); final KafkaConsumer<Object, Object> consumer = new KafkaConsumer<Object, Object>(consumerProps, avroKeyDeserializer, avroValueDeserializer); consumer.subscribe(Arrays.asList(topic)); class KafkaConsumerThread implements Runnable { private final AtomicBoolean closed = new AtomicBoolean(false); public void run() { try { while (!closed.get()) { ConsumerRecords<Object, Object> records = consumer.poll(100); for (ConsumerRecord<Object, Object> consumerRecord : records) { System.out.println("received: " + consumerRecord.value()); actualResult.add(consumerRecord.value()); countDown.countDown(); } } } catch (WakeupException e) { if (!closed.get()) throw e; } finally { consumer.commitSync(); consumer.close(); } } public void shutdown() { closed.set(true); consumer.wakeup(); } } KafkaConsumerThread thread = new KafkaConsumerThread(); new Thread(thread).start(); KafkaProducer<Object, Object> producer = new KafkaProducer<Object, Object>(producerProps, avroKeySerializer, avroValueSerializer); int i = 0; while (i++ < msgNum) { ProducerRecord<Object, Object> data = new ProducerRecord<Object, Object>(topic, null, (Object) KafkaAvroTest.generateEvent()); Future<RecordMetadata> send = producer.send(data); send.get(); if (send.isDone()) { System.out.println("sending: " + data.value()); expectedResult.add(data.value()); } } countDown.await(); thread.shutdown(); producer.close(); Assert.assertEquals(expectedResult.size(), actualResult.size()); }
Example 14
Source File: ParallelWebKafkaConsumer.java From kafka-webview with MIT License | 4 votes |
private void commit(final KafkaConsumer<?,?> kafkaConsumer) { kafkaConsumer.commitSync(); }
Example 15
Source File: KafkaOperationsTest.java From kafka-webview with MIT License | 4 votes |
/** * Helper method to consumer records from a topic. * @param topics topics to consume from. * @param consumerId Consumer's consumerId * @param consumerPrefix Any consumer Id prefix. */ private KafkaConsumer<String, String> consumeFromTopics(final Collection<String> topics, final String consumerId, final String consumerPrefix) { // Create cluster config. final ClusterConfig clusterConfig = ClusterConfig.newBuilder() .withBrokerHosts(sharedKafkaTestResource.getKafkaConnectString()) .build(); // Create Deserializer Config final DeserializerConfig deserializerConfig = DeserializerConfig.newBuilder() .withKeyDeserializerClass(KafkaConsumerFactoryTest.TestDeserializer.class) .withKeyDeserializerOption("key.option", "key.value") .withKeyDeserializerOption("key.option2", "key.value2") // Attempt to override a real setting, it should get filtered .withKeyDeserializerOption(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "MadeUpValue") .withValueDeserializerClass(KafkaConsumerFactoryTest.TestDeserializer.class) .withValueDeserializerOption("value.option", "value.value") .withValueDeserializerOption("value.option2", "value.value2") // Attempt to override a real setting, it should get filtered .withValueDeserializerOption(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "MadeUpValue") .build(); // Create Topic Config final String topic = topics.iterator().next(); final org.sourcelab.kafka.webview.ui.manager.kafka.config.TopicConfig topicConfig = new org.sourcelab.kafka.webview.ui.manager.kafka.config.TopicConfig(clusterConfig, deserializerConfig, topic); // Create FilterConfig final FilterConfig filterConfig = FilterConfig.withNoFilters(); // Create ClientConfig, instructing to start from tail. final ClientConfig clientConfig = ClientConfig.newBuilder() .withConsumerId(consumerId) .withFilterConfig(filterConfig) .withAllPartitions() .withStartingPosition(StartingPosition.newHeadPosition()) .withMaxResultsPerPartition(100) .withTopicConfig(topicConfig) .build(); // Create consumer and consume the entries, storing state in Kafka. final KafkaConsumerFactory kafkaConsumerFactory = new KafkaConsumerFactory(new KafkaClientConfigUtil("not/used", consumerPrefix)); final KafkaConsumer<String, String> consumer = kafkaConsumerFactory.createConsumerAndSubscribe(clientConfig); // subscribe to all topics. consumer.unsubscribe(); consumer.subscribe(topics); // consume and commit offsets. // Wait for assignment to complete. for (int attempts = 0; attempts < 10; attempts++) { consumer.poll(Duration.ofMillis(1000L)); final Set<TopicPartition> assignmentSet = consumer.assignment(); if (!assignmentSet.isEmpty()) { break; } } // Commit offsets. consumer.commitSync(); return consumer; }
Example 16
Source File: AvroConsumerExample.java From javabase with Apache License 2.0 | 4 votes |
private static void processRecords(KafkaConsumer<String, byte[]> consumer) throws InterruptedException { while (true) { ConsumerRecords<String, byte[]> records = consumer.poll(100); long lastOffset = 0; for (ConsumerRecord<String, byte[]> record : records) { GenericRecord genericRecord = AvroSupport.byteArrayToData(AvroSupport.getSchema(), record.value()); String firstName = AvroSupport.getValue(genericRecord, "firstName", String.class); System.out.printf("\n\roffset = %d, key = %s, value = %s", record.offset(), record.key(), firstName); lastOffset = record.offset(); } System.out.println("lastOffset read: " + lastOffset); consumer.commitSync(); Thread.sleep(500); } }
Example 17
Source File: MetricsServer.java From arcusplatform with Apache License 2.0 | 4 votes |
@Override protected void start() throws Exception { log.info("Starting metrics processing server..."); Properties props = kafkaOpsConfig.toNuConsumerProperties(); Deserializer<JsonObject> delegate = JSON.createDeserializer(JsonObject.class); KafkaConsumer<String, JsonObject> consumer = new KafkaConsumer<>(props, new StringDeserializer(), new org.apache.kafka.common.serialization.Deserializer<JsonObject>() { @Override public void configure(Map<String, ?> configs, boolean isKey) { // no-op } @Override public JsonObject deserialize(String topic, byte[] data) { try { return delegate.deserialize(data); } catch(Exception e) { log.warn("could not deserialize: ", new String(data,StandardCharsets.UTF_8)); return null; } } @Override public void close() { // no-op } }); try { log.info("starting metrics consumer..."); consumer.subscribe(ImmutableSet.of(kafkaOpsConfig.getTopicMetrics())); while(true) { ConsumerRecords<String, JsonObject> records = consumer.poll(kafkaOpsConfig.getPollingTimeoutMs()); if(!records.isEmpty()) { consume(records); } } } catch (Exception ex) { log.warn("exiting abnormally: {}", ex.getMessage(), ex); } finally { consumer.commitSync(); consumer.close(); } }
Example 18
Source File: KryoConsumerExample.java From kafka-examples with Apache License 2.0 | 4 votes |
public static void main(String[] args) { ArgumentParser parser = argParser(); try { Namespace res = parser.parseArgs(args); /* parse args */ String brokerList = res.getString("bootstrap.servers"); String topic = res.getString("topic"); Properties consumerConfig = new Properties(); consumerConfig.put("group.id", "my-group"); consumerConfig.put("bootstrap.servers", brokerList); consumerConfig.put("auto.offset.reset", "earliest"); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "kafka.examples.kryo.serde.KryoDeserializer"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "kafka.examples.kryo.serde.KryoDeserializer"); KafkaConsumer<String, Object> consumer = new KafkaConsumer<>(consumerConfig); consumer.subscribe(Collections.singletonList(topic)); while (true) { ConsumerRecords<String, Object> records = consumer.poll(1000); for (ConsumerRecord<String, Object> record : records) { System.out.printf("Received Message topic =%s, partition =%s, offset = %d, key = %s, value = %s\n", record.topic(), record.partition(), record.offset(), record.key(), record.value()); } consumer.commitSync(); } } catch (ArgumentParserException e) { if (args.length == 0) { parser.printHelp(); System.exit(0); } else { parser.handleError(e); System.exit(1); } } }
Example 19
Source File: BasicConsumerExample.java From kafka-examples with Apache License 2.0 | 4 votes |
public static void main(String[] args) { ArgumentParser parser = argParser(); try { Namespace res = parser.parseArgs(args); /* parse args */ String brokerList = res.getString("bootstrap.servers"); String topic = res.getString("topic"); String serializer = res.getString("serializer"); Properties consumerConfig = new Properties(); consumerConfig.put("group.id", "my-group"); consumerConfig.put("bootstrap.servers",brokerList); consumerConfig.put("auto.offset.reset","earliest"); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(consumerConfig); consumer.subscribe(Collections.singletonList(topic)); while (true) { ConsumerRecords<byte[], byte[]> records = consumer.poll(1000); for (ConsumerRecord<byte[], byte[]> record : records) { System.out.printf("Received Message topic =%s, partition =%s, offset = %d, key = %s, value = %s\n", record.topic(), record.partition(), record.offset(), deserialize(record.key()), deserialize(record.value())); } consumer.commitSync(); } } catch (ArgumentParserException e) { if (args.length == 0) { parser.printHelp(); System.exit(0); } else { parser.handleError(e); System.exit(1); } } }
Example 20
Source File: SingerHeartbeatTest.java From singer with Apache License 2.0 | 4 votes |
void testHeartBeat() { int numReceivedHeartbeats = 0; SingerTestHelper.createSingerConfig( singerConfigDir, singerConfigConfDir, singerDataDir, "singer_test_event", 100, "singer_test_event", "", heartbeatIntervalInMilliSeconds / 1000, heartbeatTopic); SingerTestHelper.createSingerConfigLConfFile( singerConfigConfDir, "singer.test2.properties", singerDataDir, "singer_test_event_2", 100, "singer_test_event", ""); Process singerProc = SingerTestHelper .startSingerProcess(singerBinaryDir, singerConfigDir, SingerHeartbeatTest.class); File outputDir = new File(singerDataDir); ThriftLoggerFactory.initialize(outputDir, 100); SingerTestHelper.createLogStream(singerDataDir, "singer_test_event", 100, 500); SingerTestHelper.createLogStream(singerDataDir, "singer_test_event_2", 100, 500); SingerOutputRetriever outputRetriever = new SingerOutputRetriever(singerProc.getErrorStream()); Thread outThread = new Thread(outputRetriever); outThread.start(); try { Thread.sleep(20 * 1000); Properties properties = SingerTestHelper.createKafkaConsumerConfig(); KafkaConsumer<byte[], byte[]> kafkaConsumer = new KafkaConsumer<>(properties); kafkaConsumer.subscribe(Arrays.asList(heartbeatTopic)); SingerStatus status = null; for (int i = 0; i < numHeartbeats; i++) { Thread.sleep(heartbeatIntervalInMilliSeconds); String hostName = SingerUtils.getHostname(); System.out.println("Fetching heartbeat messages from " + hostName + " : "); ConsumerRecords<byte[], byte[]> records = kafkaConsumer.poll(Duration.ofMillis(500L)); for (ConsumerRecord<byte[], byte[]> record : records) { String msg = new String(record.value()); status = new Gson().fromJson(msg, SingerStatus.class); if (System.currentTimeMillis() - status.getTimestamp() > heartbeatIntervalInMilliSeconds || !status.hostName.equals(hostName)) { System.out.println(msg); status = new Gson().fromJson(msg, SingerStatus.class); kafkaConsumer.commitSync(); } System.out.println(msg); kafkaConsumer.commitSync(); numReceivedHeartbeats++; assert (msg.contains("data.test")); assert (msg.contains("singer.test2")); } } kafkaConsumer.close(); } catch (Exception e) { e.printStackTrace(); assert (false); } finally { singerProc.destroy(); } assert (numReceivedHeartbeats == numHeartbeats); }