Java Code Examples for org.apache.kafka.clients.consumer.ConsumerRecords#isEmpty()
The following examples show how to use
org.apache.kafka.clients.consumer.ConsumerRecords#isEmpty() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LiKafkaProducerIntegrationTest.java From li-apache-kafka-clients with BSD 2-Clause "Simplified" License | 6 votes |
@Test public void testNullValue() throws Exception { String topic = "testNullValue"; createTopic(topic); try (LiKafkaProducer<String, String> producer = createProducer(null)) { producer.send(new ProducerRecord<>(topic, "key", null)); } Properties consumerProps = new Properties(); consumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); try (LiKafkaConsumer<String, String> consumer = createConsumer(consumerProps)) { consumer.subscribe(Collections.singleton(topic)); long startMs = System.currentTimeMillis(); ConsumerRecords<String, String> records = ConsumerRecords.empty(); while (records.isEmpty() && System.currentTimeMillis() < startMs + 60000) { records = consumer.poll(Duration.ofMillis(100)); } assertEquals(1, records.count()); ConsumerRecord<String, String> record = records.iterator().next(); assertEquals("key", record.key()); assertNull(record.value()); } }
Example 2
Source File: KafkaPipeLine.java From bireme with Apache License 2.0 | 6 votes |
@Override public ChangeSet pollChangeSet() throws BiremeException { ConsumerRecords<String, String> records = null; try { records = consumer.poll(POLL_TIMEOUT); } catch (InterruptException e) { } if (cxt.stop || records == null || records.isEmpty()) { return null; } KafkaCommitCallback callback = new KafkaCommitCallback(); if (!commitCallbacks.offer(callback)) { String Message = "Can't add CommitCallback to queue."; throw new BiremeException(Message); } stat.recordCount.mark(records.count()); return packRecords(records, callback); }
Example 3
Source File: AbstractInventoryCountTests.java From spring-cloud-stream-samples with Apache License 2.0 | 6 votes |
/** * Consume the actual events from the output topic. * This implementation uses a {@link Consumer}, assuming a (an embedded) Kafka Broker but may be overridden. * @param expectedCount the expected number of messages is known. This avoids a timeout delay if all is well. * * @return the consumed data. */ protected Map<ProductKey, InventoryCountEvent> consumeActualInventoryCountEvents(int expectedCount) { Map<ProductKey, InventoryCountEvent> inventoryCountEvents = new LinkedHashMap<>(); int receivedCount = 0; while (receivedCount < expectedCount) { ConsumerRecords<ProductKey, InventoryCountEvent> records = KafkaTestUtils.getRecords(consumer, 1000); if (records.isEmpty()) { logger.error("No more records received. Expected {} received {}.", expectedCount, receivedCount); break; } receivedCount += records.count(); for (Iterator<ConsumerRecord<ProductKey, InventoryCountEvent>> it = records.iterator(); it.hasNext(); ) { ConsumerRecord<ProductKey, InventoryCountEvent> consumerRecord = it.next(); logger.debug("consumed " + consumerRecord.key().getProductCode() + " = " + consumerRecord.value().getCount()); inventoryCountEvents.put(consumerRecord.key(), consumerRecord.value()); } } return inventoryCountEvents; }
Example 4
Source File: MessageListenerThread.java From core-ng-project with Apache License 2.0 | 6 votes |
private void process() { while (!listener.shutdown) { try { ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofSeconds(30)); // consumer should call poll at least once every MAX_POLL_INTERVAL_MS if (records.isEmpty()) continue; processRecords(records); } catch (Throwable e) { if (!listener.shutdown) { logger.error("failed to pull message, retry in 10 seconds", e); Threads.sleepRoughly(Duration.ofSeconds(10)); } } } if (consumer != null) { // consumer can be null if host is not resolvable logger.info("close kafka consumer, name={}", getName()); consumer.close(); } }
Example 5
Source File: StreamApplicationIntegrationTestHarness.java From samza with Apache License 2.0 | 6 votes |
/** * Read messages from the provided list of topics until {@param threshold} messages have been read or until * {@link #numEmptyPolls} polls return no messages. * * The default poll time out is determined by {@link #POLL_TIMEOUT_MS} and the number of empty polls are * determined by {@link #numEmptyPolls} * * @param topics the list of topics to consume from * @param threshold the number of messages to consume * @return the list of {@link ConsumerRecord}s whose size can be atmost {@param threshold} */ public List<ConsumerRecord<String, String>> consumeMessages(Collection<String> topics, int threshold) { int emptyPollCount = 0; List<ConsumerRecord<String, String>> recordList = new ArrayList<>(); consumer.subscribe(topics); while (emptyPollCount < numEmptyPolls && recordList.size() < threshold) { ConsumerRecords<String, String> records = consumer.poll(POLL_TIMEOUT_MS); LOG.info("Read {} messages from topics: {}", records.count(), StringUtils.join(topics, ",")); if (!records.isEmpty()) { Iterator<ConsumerRecord<String, String>> iterator = records.iterator(); while (iterator.hasNext() && recordList.size() < threshold) { ConsumerRecord record = iterator.next(); LOG.info("Read key: {} val: {} from topic: {} on partition: {}", record.key(), record.value(), record.topic(), record.partition()); recordList.add(record); emptyPollCount = 0; } } else { emptyPollCount++; } } return recordList; }
Example 6
Source File: TestStreamProcessor.java From samza with Apache License 2.0 | 6 votes |
/** * Consumes data from the topic until there are no new messages for a while * and asserts that the number of consumed messages is as expected. */ @SuppressWarnings("unchecked") private void verifyNumMessages(KafkaConsumer consumer, String topic, int expectedNumMessages) { consumer.subscribe(Collections.singletonList(topic)); int count = 0; int emptyPollCount = 0; while (count < expectedNumMessages && emptyPollCount < 5) { ConsumerRecords records = consumer.poll(5000); if (!records.isEmpty()) { for (ConsumerRecord record : (Iterable<ConsumerRecord>) records) { Assert.assertEquals(new String((byte[]) record.value()), String.valueOf(count)); count++; } } else { emptyPollCount++; } } Assert.assertEquals(count, expectedNumMessages); }
Example 7
Source File: AGenericConsumerHandler.java From SO with BSD 2-Clause "Simplified" License | 6 votes |
/** * Runnable interface implement.<BR/> */ @Override public void run() { log.debug("The handler:{} thread started.", id); try { subscribe(getTopicList()); while (!closed.get()) { ConsumerRecords<String, String> records = getMessage(); log.debug("records count: {}", records.count()); if(records == null || records.isEmpty()) continue; handle(records); } } catch (WakeupException e) { log.error(e.getMessage()); //Ignore exception if closing if(!closed.get()) throw e; } finally { this.close(); } log.debug("The handler:{} thread ended.", id); }
Example 8
Source File: ProcessingServiceBackend.java From java-11-examples with Apache License 2.0 | 6 votes |
public void start() { Collection<String> topics = Collections.singletonList(TOPIC_SERVICE_REQUESTS); this.consumer.subscribe(topics); LOG.info("Waiting for requests {} ...", serviceId); this.running = true; while (running) { ConsumerRecords<String, Bytes> records = consumer.poll(Duration.ofMillis(10)); if (!records.isEmpty()) { for (ConsumerRecord<String, Bytes> record: records) { try { ServiceRequest request = dataMapper.deserialize(record.value(), ServiceRequest.class); LOG.info("Received Request: {}:{}:{}", record.key(), request.getClientId(), request.getTaskId()); ServiceResponse response = new ServiceResponse(request.getTaskId(), request.getClientId(), request.getData(), "response:" + request.getData()); Bytes bytes = dataMapper.serialize(response); ProducerRecord<String, Bytes> recordReply = new ProducerRecord<>(TOPIC_SERVICE_RESPONSES, response.getTaskId(), bytes); producer.send(recordReply); LOG.info("Response has been send !"); } catch (IOException e) { LOG.error("Exception: ", e); } } } } LOG.info("done {}.", serviceId); }
Example 9
Source File: KafkaSinkTester.java From pulsar with Apache License 2.0 | 6 votes |
@Override public void validateSinkResult(Map<String, String> kvs) { Iterator<Map.Entry<String, String>> kvIter = kvs.entrySet().iterator(); while (kvIter.hasNext()) { ConsumerRecords<String, String> records = kafkaConsumer.poll(1000); log.info("Received {} records from kafka topic {}", records.count(), kafkaTopicName); if (records.isEmpty()) { continue; } Iterator<ConsumerRecord<String, String>> recordsIter = records.iterator(); while (recordsIter.hasNext() && kvIter.hasNext()) { ConsumerRecord<String, String> consumerRecord = recordsIter.next(); Map.Entry<String, String> expectedRecord = kvIter.next(); assertEquals(expectedRecord.getKey(), consumerRecord.key()); assertEquals(expectedRecord.getValue(), consumerRecord.value()); } } }
Example 10
Source File: AbstractCdcTest.java From light-eventuate-4j with Apache License 2.0 | 5 votes |
public void waitForEventInKafka(KafkaConsumer<String, String> consumer, String entityId, LocalDateTime deadline) throws InterruptedException { while (LocalDateTime.now().isBefore(deadline)) { long millis = ChronoUnit.MILLIS.between(LocalDateTime.now(), deadline); ConsumerRecords<String, String> records = consumer.poll(millis); if (!records.isEmpty()) { for (ConsumerRecord<String, String> record : records) { if (record.key().equals(entityId)) { return; } } } } throw new RuntimeException("entity not found: " + entityId); }
Example 11
Source File: CodecEndpoint.java From quarkus with Apache License 2.0 | 5 votes |
@GET @Path("/pets") public Pet getPet() { final ConsumerRecords<String, Pet> records = petConsumer.poll(Duration.ofMillis(60000)); if (records.isEmpty()) { return null; } return records.iterator().next().value(); }
Example 12
Source File: KafkaConsumerManager.java From quarkus with Apache License 2.0 | 5 votes |
public String receive() { final ConsumerRecords<Integer, String> records = consumer.poll(Duration.ofMillis(60000)); if (records.isEmpty()) { return null; } return records.iterator().next().value(); }
Example 13
Source File: CvoConsumerHandler.java From SO with BSD 2-Clause "Simplified" License | 5 votes |
/** * handle method.<BR/> * * @param records ConsumerRecords */ @Override public void handle(ConsumerRecords<String, String> records) { if (!records.isEmpty()) { for (ConsumerRecord<String, String> record : records) { handle(record); } } }
Example 14
Source File: CaseController.java From skywalking with Apache License 2.0 | 5 votes |
@Override public void run() { Properties consumerProperties = new Properties(); consumerProperties.put("bootstrap.servers", bootstrapServers); consumerProperties.put("group.id", "testGroup2"); consumerProperties.put("enable.auto.commit", "true"); consumerProperties.put("auto.commit.interval.ms", "1000"); consumerProperties.put("auto.offset.reset", "earliest"); consumerProperties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); consumerProperties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); int i = 0; while (i++ <= 10) { try { Thread.sleep(1 * 1000); } catch (InterruptedException e) { } ConsumerRecords<String, String> records = consumer.poll(100); if (!records.isEmpty()) { for (ConsumerRecord<String, String> record : records) { logger.info("header: {}", new String(record.headers() .headers("TEST") .iterator() .next() .value())); logger.info("offset = {}, key = {}, value = {}", record.offset(), record.key(), record.value()); } break; } } consumer.close(); }
Example 15
Source File: LiKafkaConsumerIntegrationTest.java From li-apache-kafka-clients with BSD 2-Clause "Simplified" License | 5 votes |
private void verifyMessagesAfterSeek(LiKafkaConsumer<String, String> consumer, List<Long> expectedOffsets) { ConsumerRecords<String, String> records; for (long expectedOffset : expectedOffsets) { records = null; while (records == null || records.isEmpty()) { records = consumer.poll(10); } assertEquals(records.count(), 1, "Should return one message"); assertEquals(records.iterator().next().offset(), expectedOffset, "Message offset should be " + expectedOffset); } }
Example 16
Source File: DoctorKafkaActionsServlet.java From doctorkafka with Apache License 2.0 | 5 votes |
private List<ConsumerRecord<byte[], byte[]>> retrieveActionReportMessages() { DoctorKafkaConfig doctorKafkaConfig = DoctorKafkaMain.doctorKafka.getDoctorKafkaConfig(); String zkUrl = doctorKafkaConfig.getBrokerstatsZkurl(); String actionReportTopic = doctorKafkaConfig.getActionReportTopic(); Properties properties = OperatorUtil.createKafkaConsumerProperties(zkUrl, OPERATOR_ACTIONS_CONSUMER_GROUP, doctorKafkaConfig.getActionReportProducerSecurityProtocol(), doctorKafkaConfig.getActionReportProducerSslConfigs()); KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(properties); TopicPartition operatorReportTopicPartition = new TopicPartition(actionReportTopic, 0); List<TopicPartition> tps = new ArrayList<>(); tps.add(operatorReportTopicPartition); consumer.assign(tps); Map<TopicPartition, Long> beginOffsets = consumer.beginningOffsets(tps); Map<TopicPartition, Long> endOffsets = consumer.endOffsets(tps); for (TopicPartition tp : endOffsets.keySet()) { long numMessages = endOffsets.get(tp) - beginOffsets.get(tp); LOG.info("{} : offsets [{}, {}], num messages : {}", tp, beginOffsets.get(tp), endOffsets.get(tp), numMessages); consumer.seek(tp, Math.max(beginOffsets.get(tp), endOffsets.get(tp) - NUM_MESSAGES)); } ConsumerRecords<byte[], byte[]> records = consumer.poll(CONSUMER_POLL_TIMEOUT_MS); List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>(); while (!records.isEmpty()) { for (ConsumerRecord<byte[], byte[]> record : records) { recordList.add(record); } records = consumer.poll(CONSUMER_POLL_TIMEOUT_MS); } LOG.info("Read {} messages", recordList.size()); return recordList; }
Example 17
Source File: OrchestrationServiceConsumerHandler.java From SO with BSD 2-Clause "Simplified" License | 5 votes |
/** * handle method.<BR/> * * @param records ConsumerRecords */ @Override public void handle(ConsumerRecords<String, String> records) { if (!records.isEmpty()) { for (ConsumerRecord<String, String> record : records) { handle(record); } } }
Example 18
Source File: KafkaApiTest.java From pulsar with Apache License 2.0 | 4 votes |
@Test public void testProducerConsumerMixedSchemaWithPulsarKafkaClient() throws Exception { String topic = "testProducerConsumerMixedSchemaWithPulsarKafkaClient"; Schema<String> keySchema = new PulsarKafkaSchema<>(new StringSerializer(), new StringDeserializer()); JSONSchema<Foo> valueSchema = JSONSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build()); Properties props = new Properties(); props.put("bootstrap.servers", getPlainTextServiceUrl()); props.put("group.id", "my-subscription-name"); props.put("enable.auto.commit", "false"); props.put("key.serializer", IntegerSerializer.class.getName()); props.put("value.serializer", StringSerializer.class.getName()); props.put("key.deserializer", StringDeserializer.class.getName()); props.put("value.deserializer", StringDeserializer.class.getName()); @Cleanup Consumer<String, Foo> consumer = new KafkaConsumer<>(props, keySchema, valueSchema); consumer.subscribe(Arrays.asList(topic)); Producer<String, Foo> producer = new KafkaProducer<>(props, keySchema, valueSchema); for (int i = 0; i < 10; i++) { Foo foo = new Foo(); foo.setField1("field1"); foo.setField2("field2"); foo.setField3(i); producer.send(new ProducerRecord<>(topic, "hello" + i, foo)); } producer.flush(); producer.close(); AtomicInteger received = new AtomicInteger(); while (received.get() < 10) { ConsumerRecords<String, Foo> records = consumer.poll(100); if (!records.isEmpty()) { records.forEach(record -> { String key = record.key(); Assert.assertEquals(key, "hello" + received.get()); Foo value = record.value(); Assert.assertEquals(value.getField1(), "field1"); Assert.assertEquals(value.getField2(), "field2"); Assert.assertEquals(value.getField3(), received.get()); received.incrementAndGet(); }); consumer.commitSync(); } } }
Example 19
Source File: OffsetKafkaStore.java From light-eventuate-4j with Apache License 2.0 | 4 votes |
public Optional<BinlogFileOffset> getLastBinlogFileOffset() { /* try ( KafkaConsumer<String, String> consumer = createConsumer()){ consumer.partitionsFor(dbHistoryTopicName); consumer.subscribe(Arrays.asList(dbHistoryTopicName)); int count = N; BinlogFileOffset result = null; boolean lastRecordFound = false; while (!lastRecordFound) { ConsumerRecords<String, String> records = consumer.poll(100); if (records.isEmpty()) { count--; if (count == 0) lastRecordFound = true; } else { count = N; for (ConsumerRecord<String, String> record : records) { BinlogFileOffset current = handleRecord(record); if (current != null) { result = current; } } } } return Optional.ofNullable(result); }*/ for (int i=0; i<5; i++) { try(KafkaConsumer<String, String> consumer = createConsumer()) { consumer.partitionsFor(dbHistoryTopicName); consumer.subscribe(Arrays.asList(dbHistoryTopicName)); int count = N; BinlogFileOffset result = null; boolean lastRecordFound = false; while (!lastRecordFound) { ConsumerRecords<String, String> records = consumer.poll(100); if (records.isEmpty()) { count--; if (count == 0) lastRecordFound = true; } else { count = N; for (ConsumerRecord<String, String> record : records) { BinlogFileOffset current = handleRecord(record); if (current != null) { result = current; } } } } return Optional.ofNullable(result); } catch (Exception e) { logger.error("kafak consumer error:" + e); System.out.println("kafak consumer error:" + e); try { Thread.sleep(2000); } catch (InterruptedException ie) { logger.error(ie.getMessage(), ie); } } } return Optional.ofNullable(null); }
Example 20
Source File: KafkaTestUtils.java From kafka-junit with BSD 3-Clause "New" or "Revised" License | 4 votes |
/** * This will consume all records from the partitions passed on the given topic. * @param <K> Type of key values. * @param <V> Type of message values. * @param topic Topic to consume from. * @param partitionIds Which partitions to consume from. * @param keyDeserializer How to deserialize the key values. * @param valueDeserializer How to deserialize the messages. * @return List of ConsumerRecords consumed. */ public <K, V> List<ConsumerRecord<K, V>> consumeAllRecordsFromTopic( final String topic, final Collection<Integer> partitionIds, final Class<? extends Deserializer<K>> keyDeserializer, final Class<? extends Deserializer<V>> valueDeserializer ) { // Create topic Partitions final List<TopicPartition> topicPartitions = partitionIds .stream() .map((partitionId) -> new TopicPartition(topic, partitionId)) .collect(Collectors.toList()); // Holds our results. final List<ConsumerRecord<K, V>> allRecords = new ArrayList<>(); // Connect Consumer try (final KafkaConsumer<K, V> kafkaConsumer = getKafkaConsumer(keyDeserializer, valueDeserializer, new Properties())) { // Assign topic partitions & seek to head of them kafkaConsumer.assign(topicPartitions); kafkaConsumer.seekToBeginning(topicPartitions); // Pull records from kafka, keep polling until we get nothing back ConsumerRecords<K, V> records; final int maxEmptyLoops = 2; int loopsLeft = maxEmptyLoops; do { // Grab records from kafka records = kafkaConsumer.poll(2000L); logger.debug("Found {} records in kafka", records.count()); // Add to our array list records.forEach(allRecords::add); // We want two full poll() calls that return empty results to break the loop. if (!records.isEmpty()) { // If we found records, reset our loop control variable. loopsLeft = maxEmptyLoops; } else { // Otherwise decrement the loop control variable. loopsLeft--; } } while (loopsLeft > 0); } // return all records return allRecords; }