Java Code Examples for org.apache.kafka.clients.consumer.ConsumerRecords#iterator()
The following examples show how to use
org.apache.kafka.clients.consumer.ConsumerRecords#iterator() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaLegacyClientIT.java From apm-agent-java with Apache License 2.0 | 6 votes |
private void sendTwoRecordsAndConsumeReplies() { final StringBuilder callback = new StringBuilder(); ProducerRecord<String, String> record1 = new ProducerRecord<>(REQUEST_TOPIC, 0, REQUEST_KEY, FIRST_MESSAGE_VALUE); ProducerRecord<String, String> record2 = new ProducerRecord<>(REQUEST_TOPIC, REQUEST_KEY, SECOND_MESSAGE_VALUE); producer.send(record1); producer.send(record2, (metadata, exception) -> callback.append("done")); if (testScenario != TestScenario.IGNORE_REQUEST_TOPIC) { await().atMost(2000, MILLISECONDS).until(() -> reporter.getSpans().size() == 2); } ConsumerRecords<String, String> replies = replyConsumer.poll(2000); assertThat(callback).isNotEmpty(); assertThat(replies.count()).isEqualTo(2); Iterator<ConsumerRecord<String, String>> iterator = replies.iterator(); assertThat(iterator.next().value()).isEqualTo(FIRST_MESSAGE_VALUE); assertThat(iterator.next().value()).isEqualTo(SECOND_MESSAGE_VALUE); // this is required in order to end transactions related to the record iteration assertThat(iterator.hasNext()).isFalse(); }
Example 2
Source File: KafkaLegacyBrokerIT.java From apm-agent-java with Apache License 2.0 | 6 votes |
private void sendTwoRecordsAndConsumeReplies() { final StringBuilder callback = new StringBuilder(); ProducerRecord<String, String> record1 = new ProducerRecord<>(REQUEST_TOPIC, 0, REQUEST_KEY, FIRST_MESSAGE_VALUE); ProducerRecord<String, String> record2 = new ProducerRecord<>(REQUEST_TOPIC, REQUEST_KEY, SECOND_MESSAGE_VALUE); producer.send(record1); producer.send(record2, (metadata, exception) -> callback.append("done")); if (testScenario != TestScenario.IGNORE_REQUEST_TOPIC) { await().atMost(2000, MILLISECONDS).until(() -> reporter.getTransactions().size() == 2); int expectedSpans = (testScenario == TestScenario.NO_CONTEXT_PROPAGATION) ? 2 : 4; await().atMost(500, MILLISECONDS).until(() -> reporter.getSpans().size() == expectedSpans); } //noinspection deprecation - this poll overload is deprecated in newer clients, but enables testing of old ones ConsumerRecords<String, String> replies = replyConsumer.poll(2000); assertThat(callback).isNotEmpty(); assertThat(replies.count()).isEqualTo(2); Iterator<ConsumerRecord<String, String>> iterator = replies.iterator(); assertThat(iterator.next().value()).isEqualTo(FIRST_MESSAGE_VALUE); assertThat(iterator.next().value()).isEqualTo(SECOND_MESSAGE_VALUE); // this is required in order to end transactions related to the record iteration assertThat(iterator.hasNext()).isFalse(); }
Example 3
Source File: KafkaExportIT.java From rya with Apache License 2.0 | 6 votes |
private VisibilityBindingSet readLastResult(final String pcjId) throws Exception { requireNonNull(pcjId); // Read the results from the Kafka topic. The last one has the final aggregation result. VisibilityBindingSet result = null; try(final KafkaConsumer<String, VisibilityBindingSet> consumer = makeConsumer(pcjId)) { final ConsumerRecords<String, VisibilityBindingSet> records = consumer.poll(5000); final Iterator<ConsumerRecord<String, VisibilityBindingSet>> recordIterator = records.iterator(); while (recordIterator.hasNext()) { result = recordIterator.next().value(); } } return result; }
Example 4
Source File: KafkaExportIT.java From rya with Apache License 2.0 | 6 votes |
private Set<VisibilityBindingSet> readGroupedResults(final String pcjId, final VariableOrder groupByVars) { requireNonNull(pcjId); // Read the results from the Kafka topic. The last one for each set of Group By values is an aggregation result. // The key in this map is a Binding Set containing only the group by variables. final Map<BindingSet, VisibilityBindingSet> results = new HashMap<>(); try(final KafkaConsumer<String, VisibilityBindingSet> consumer = makeConsumer(pcjId)) { final ConsumerRecords<String, VisibilityBindingSet> records = consumer.poll(5000); final Iterator<ConsumerRecord<String, VisibilityBindingSet>> recordIterator = records.iterator(); while (recordIterator.hasNext()) { final VisibilityBindingSet visBindingSet = recordIterator.next().value(); final MapBindingSet key = new MapBindingSet(); for(final String groupByBar : groupByVars) { key.addBinding( visBindingSet.getBinding(groupByBar) ); } results.put(key, visBindingSet); } } return Sets.newHashSet( results.values() ); }
Example 5
Source File: KafkaQueue.java From NetDiscovery with Apache License 2.0 | 6 votes |
@Override public Request poll(String spiderName) { // max.poll.records=1 强制消费一条数据 ConsumerRecords<String, Request> records = consumer.poll(timeout); if (records!=null && records.iterator()!=null && records.count()>0) { consumer.commitAsync(); ConsumerRecord<String, Request> record = records.iterator().next(); log.info("kafka consumer result count: {}, data: {}", records.count(), record); this.currentOffset = record.offset(); return record.value(); } return null; }
Example 6
Source File: ConsumerRecordsProcessorTest.java From li-apache-kafka-clients with BSD 2-Clause "Simplified" License | 6 votes |
@Test public void testCorrectness() { ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor(); ConsumerRecords<String, String> processedRecords = consumerRecordsProcessor.process(getConsumerRecords()).consumerRecords(); assertEquals(processedRecords.count(), 4, "There should be 4 records"); Iterator<ConsumerRecord<String, String>> iter = processedRecords.iterator(); ConsumerRecord<String, String> record = iter.next(); assertEquals(record.offset(), 0, "Message offset should b 0"); assertNull(record.headers().lastHeader(Constants.SAFE_OFFSET_HEADER)); record = iter.next(); assertEquals(record.offset(), 2, "Message offset should b 2"); assertEquals( PrimitiveEncoderDecoder.decodeLong(record.headers().lastHeader(Constants.SAFE_OFFSET_HEADER).value(), 0), 1L ); record = iter.next(); assertEquals(record.offset(), 4, "Message offset should b 4"); assertEquals( PrimitiveEncoderDecoder.decodeLong(record.headers().lastHeader(Constants.SAFE_OFFSET_HEADER).value(), 0), 1L ); record = iter.next(); assertEquals(record.offset(), 5, "Message offset should b 5"); assertNull(record.headers().lastHeader(Constants.SAFE_OFFSET_HEADER)); }
Example 7
Source File: KafkaRyaSubGraphExportIT.java From rya with Apache License 2.0 | 6 votes |
private Set<RyaSubGraph> readAllResults(final String pcjId) throws Exception { requireNonNull(pcjId); // Read all of the results from the Kafka topic. final Set<RyaSubGraph> results = new HashSet<>(); try (final KafkaConsumer<String, RyaSubGraph> consumer = makeRyaSubGraphConsumer(pcjId)) { final ConsumerRecords<String, RyaSubGraph> records = consumer.poll(5000); final Iterator<ConsumerRecord<String, RyaSubGraph>> recordIterator = records.iterator(); while (recordIterator.hasNext()) { results.add(recordIterator.next().value()); } } return results; }
Example 8
Source File: AbstractInventoryCountTests.java From spring-cloud-stream-samples with Apache License 2.0 | 6 votes |
/** * Consume the actual events from the output topic. * This implementation uses a {@link Consumer}, assuming a (an embedded) Kafka Broker but may be overridden. * @param expectedCount the expected number of messages is known. This avoids a timeout delay if all is well. * * @return the consumed data. */ protected Map<ProductKey, InventoryCountEvent> consumeActualInventoryCountEvents(int expectedCount) { Map<ProductKey, InventoryCountEvent> inventoryCountEvents = new LinkedHashMap<>(); int receivedCount = 0; while (receivedCount < expectedCount) { ConsumerRecords<ProductKey, InventoryCountEvent> records = KafkaTestUtils.getRecords(consumer, 1000); if (records.isEmpty()) { logger.error("No more records received. Expected {} received {}.", expectedCount, receivedCount); break; } receivedCount += records.count(); for (Iterator<ConsumerRecord<ProductKey, InventoryCountEvent>> it = records.iterator(); it.hasNext(); ) { ConsumerRecord<ProductKey, InventoryCountEvent> consumerRecord = it.next(); logger.debug("consumed " + consumerRecord.key().getProductCode() + " = " + consumerRecord.value().getCount()); inventoryCountEvents.put(consumerRecord.key(), consumerRecord.value()); } } return inventoryCountEvents; }
Example 9
Source File: ConsumeDataIterator.java From oryx with Apache License 2.0 | 6 votes |
@Override protected KeyMessage<K,V> computeNext() { if (iterator == null || !iterator.hasNext()) { try { long timeout = MIN_POLL_MS; ConsumerRecords<K, V> records; while ((records = consumer.poll(timeout)).isEmpty()) { timeout = Math.min(MAX_POLL_MS, timeout * 2); } iterator = records.iterator(); } catch (Exception e) { consumer.close(); return endOfData(); } } ConsumerRecord<K,V> mm = iterator.next(); return new KeyMessageImpl<>(mm.key(), mm.value()); }
Example 10
Source File: KafkaSinkTester.java From pulsar with Apache License 2.0 | 6 votes |
@Override public void validateSinkResult(Map<String, String> kvs) { Iterator<Map.Entry<String, String>> kvIter = kvs.entrySet().iterator(); while (kvIter.hasNext()) { ConsumerRecords<String, String> records = kafkaConsumer.poll(1000); log.info("Received {} records from kafka topic {}", records.count(), kafkaTopicName); if (records.isEmpty()) { continue; } Iterator<ConsumerRecord<String, String>> recordsIter = records.iterator(); while (recordsIter.hasNext() && kvIter.hasNext()) { ConsumerRecord<String, String> consumerRecord = recordsIter.next(); Map.Entry<String, String> expectedRecord = kvIter.next(); assertEquals(expectedRecord.getKey(), consumerRecord.key()); assertEquals(expectedRecord.getValue(), consumerRecord.value()); } } }
Example 11
Source File: KafkaEasyTransMsgConsumerImpl.java From EasyTransaction with Apache License 2.0 | 5 votes |
private synchronized void pollAndDispatchMessage() throws InterruptedException { // 处理记录过程中,不能修改consumer相关的设定 // 拉取需要处理的记录 ConsumerRecords<String, byte[]> allRecords = consumer.poll(10000); // 为每个消息都封装成CALLABLE的形式,并进行调用处理 Iterator<ConsumerRecord<String, byte[]>> iterator = allRecords.iterator(); List<MessageHandler> listJob = new LinkedList<>(); while (iterator.hasNext()) { listJob.add(new MessageHandler(iterator.next())); } executeJobs(listJob); // 全部调用成功,更新消费坐标 consumer.commitAsync(); }
Example 12
Source File: DefaultWebKafkaConsumer.java From kafka-webview with MIT License | 5 votes |
private List<KafkaResult> consume() { final List<KafkaResult> kafkaResultList = new ArrayList<>(); final ConsumerRecords consumerRecords = kafkaConsumer.poll(pollTimeoutDuration); logger.info("Consumed {} records", consumerRecords.count()); final Iterator<ConsumerRecord> recordIterator = consumerRecords.iterator(); while (recordIterator.hasNext()) { // Get next record final ConsumerRecord consumerRecord = recordIterator.next(); // Convert to KafkaResult. final KafkaResult kafkaResult = new KafkaResult( consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp(), consumerRecord.key(), consumerRecord.value() ); // Add to list. kafkaResultList.add(kafkaResult); } // Commit offsets commit(); return kafkaResultList; }
Example 13
Source File: KafkaConsumerThread.java From incubator-samoa with Apache License 2.0 | 5 votes |
private List<byte[]> getMessagesBytes(ConsumerRecords<String, byte[]> poll) { Iterator<ConsumerRecord<String, byte[]>> iterator = poll.iterator(); List<byte[]> ret = new ArrayList<>(); while (iterator.hasNext()) { ret.add(iterator.next().value()); } return ret; }
Example 14
Source File: ToUpperCaseProcessorIntTests.java From spring-cloud-stream-samples with Apache License 2.0 | 5 votes |
@Test void testMessagesOverKafka() { this.template.send(TEST_TOPIC_IN, "test".getBytes()); Consumer<byte[], String> consumer = this.consumerFactory.createConsumer(); embeddedKafkaBroker.consumeFromAnEmbeddedTopic(consumer, TEST_TOPIC_OUT); ConsumerRecords<byte[], String> replies = KafkaTestUtils.getRecords(consumer); assertThat(replies.count()).isEqualTo(1); Iterator<ConsumerRecord<byte[], String>> iterator = replies.iterator(); assertThat(iterator.next().value()).isEqualTo("TEST"); }
Example 15
Source File: KafkaUtilsTest.java From incubator-samoa with Apache License 2.0 | 4 votes |
/** * Test of sendKafkaMessage method, of class KafkaUtils. * * @throws java.lang.InterruptedException */ @Test public void testSendKafkaMessage() throws InterruptedException { logger.log(Level.INFO, "sendKafkaMessage"); logger.log(Level.INFO, "Initialising producer"); KafkaUtils instance = new KafkaUtils(TestUtilsForKafka.getConsumerProperties(BROKERHOST, BROKERPORT), TestUtilsForKafka.getProducerProperties("rcv-test", BROKERHOST, BROKERPORT), CONSUMER_TIMEOUT); instance.initializeProducer(); logger.log(Level.INFO, "Initialising consumer"); KafkaConsumer<String, byte[]> consumer; consumer = new KafkaConsumer<>(TestUtilsForKafka.getConsumerProperties(BROKERHOST, BROKERPORT)); consumer.subscribe(Arrays.asList(TOPIC_S)); logger.log(Level.INFO, "Produce data"); List<byte[]> sent = new ArrayList<>(); Random r = new Random(); InstancesHeader header = TestUtilsForKafka.generateHeader(10); Gson gson = new Gson(); for (int i = 0; i < NUM_INSTANCES; i++) { byte[] val = gson.toJson(TestUtilsForKafka.getData(r, 10, header)).getBytes(); sent.add(val); instance.sendKafkaMessage(TOPIC_S, val); } // wait for Kafka a bit :) Thread.sleep(2 * CONSUMER_TIMEOUT); logger.log(Level.INFO, "Get results from Kafka"); List<byte[]> consumed = new ArrayList<>(); while (consumed.size() != sent.size()) { ConsumerRecords<String, byte[]> records = consumer.poll(CONSUMER_TIMEOUT); Iterator<ConsumerRecord<String, byte[]>> it = records.iterator(); while (it.hasNext()) { consumed.add(it.next().value()); } } consumer.close(); assertArrayEquals(sent.toArray(), consumed.toArray()); }
Example 16
Source File: KafkaExportITBase.java From rya with Apache License 2.0 | 4 votes |
/** * Test kafka without rya code to make sure kafka works in this environment. * If this test fails then its a testing environment issue, not with Rya. * Source: https://github.com/asmaier/mini-kafka */ @Test public void embeddedKafkaTest() throws Exception { // create topic final String topic = "testTopic"; AdminUtils.createTopic(zkUtils, topic, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$); // setup producer final Properties producerProps = new Properties(); producerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT); producerProps.setProperty("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer"); producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer"); final KafkaProducer<Integer, byte[]> producer = new KafkaProducer<>(producerProps); // setup consumer final Properties consumerProps = new Properties(); consumerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT); consumerProps.setProperty("group.id", "group0"); consumerProps.setProperty("client.id", "consumer0"); consumerProps.setProperty("key.deserializer", "org.apache.kafka.common.serialization.IntegerDeserializer"); consumerProps.setProperty("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer"); // to make sure the consumer starts from the beginning of the topic consumerProps.put("auto.offset.reset", "earliest"); final KafkaConsumer<Integer, byte[]> consumer = new KafkaConsumer<>(consumerProps); consumer.subscribe(Arrays.asList(topic)); // send message final ProducerRecord<Integer, byte[]> data = new ProducerRecord<>(topic, 42, "test-message".getBytes(StandardCharsets.UTF_8)); producer.send(data); producer.close(); // starting consumer final ConsumerRecords<Integer, byte[]> records = consumer.poll(3000); assertEquals(1, records.count()); final Iterator<ConsumerRecord<Integer, byte[]>> recordIterator = records.iterator(); final ConsumerRecord<Integer, byte[]> record = recordIterator.next(); assertEquals(42, (int) record.key()); assertEquals("test-message", new String(record.value(), StandardCharsets.UTF_8)); consumer.close(); }
Example 17
Source File: KafkaConsumerManager.java From vertx-kafka-service with Apache License 2.0 | 4 votes |
private void read() { while (!consumer.subscription().isEmpty()) { final ConsumerRecords<String, String> records = consumer.poll(60000); if (shutdownRequested.get()) { executeStopConsumer(); } final Iterator<ConsumerRecord<String, String>> iterator = records.iterator(); while (iterator.hasNext()) { if (shutdownRequested.get()) { executeStopConsumer(); return; } rateLimiter.ifPresent(limiter -> limiter.acquire()); final PrometheusMetrics.InProgressMessage inProgressMessage = prometheusMetrics.messageStarted(); final int phase = phaser.register(); lastPhase.set(phase); final ConsumerRecord<String, String> msg = iterator.next(); final long offset = msg.offset(); final long partition = msg.partition(); unacknowledgedOffsets.add(offset); lastReadOffset.set(offset); lastCommittedOffset.compareAndSet(0, offset); currentPartition.compareAndSet(-1, partition); handle(msg.value(), partition, offset, configuration.getMaxRetries(), configuration.getInitialRetryDelaySeconds(), inProgressMessage); if (unacknowledgedOffsets.size() >= configuration.getMaxUnacknowledged() || partititionChanged(partition) || tooManyUncommittedOffsets(offset) || commitTimeoutReached()) { LOG.info("{}: Got {} unacknowledged messages, waiting for ACKs in order to commit", configuration.getKafkaTopic(), unacknowledgedOffsets.size()); if (!waitForAcks(phase)) { return; } commitOffsetsIfAllAcknowledged(offset); LOG.info("{}: Continuing message processing on partition {}", configuration.getKafkaTopic(), currentPartition.get()); } } } LOG.info("{}: ConsumerManager:read exited loop, consuming of messages has ended.", configuration.getKafkaTopic()); }
Example 18
Source File: KafkaDestinationProcessorTest.java From incubator-samoa with Apache License 2.0 | 4 votes |
@Test public void testSendingData() throws InterruptedException, ExecutionException, TimeoutException { final Logger logger = Logger.getLogger(KafkaDestinationProcessorTest.class.getName()); Properties props = TestUtilsForKafka.getProducerProperties(BROKERHOST,BROKERPORT); props.setProperty("auto.offset.reset", "earliest"); KafkaDestinationProcessor kdp = new KafkaDestinationProcessor(props, TOPIC, new OosTestSerializer()); kdp.onCreate(1); final int[] i = {0}; // prepare new thread for data receiveing Thread th = new Thread(new Runnable() { @Override public void run() { KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(TestUtilsForKafka.getConsumerProperties(BROKERHOST, BROKERPORT)); consumer.subscribe(Arrays.asList(TOPIC)); while (i[0] < NUM_INSTANCES) { try { ConsumerRecords<String, byte[]> cr = consumer.poll(CONSUMER_TIMEOUT); Iterator<ConsumerRecord<String, byte[]>> it = cr.iterator(); while (it.hasNext()) { ConsumerRecord<String, byte[]> record = it.next(); i[0]++; } } catch (Exception ex) { Logger.getLogger(KafkaDestinationProcessorTest.class.getName()).log(Level.SEVERE, null, ex); } } consumer.close(); } }); th.start(); int z = 0; Random r = new Random(); InstancesHeader header = TestUtilsForKafka.generateHeader(10); for (z = 0; z < NUM_INSTANCES; z++) { InstanceContentEvent event = TestUtilsForKafka.getData(r, 10, header); kdp.process(event); // logger.log(Level.INFO, "{0} {1}", new Object[]{"Sent item with id: ", z}); } // wait for all instances to be read Thread.sleep(2 * CONSUMER_TIMEOUT); assertEquals("Number of sent and received instances", z, i[0]); }
Example 19
Source File: RecordFilterInterceptorTest.java From kafka-webview with MIT License | 4 votes |
/** * Test that filters can filter messages. */ @Test public void testFilterMessages() { final int totalRecords = 5; // Create mock Filters final RecordFilter mockFilter1 = mock(RecordFilter.class); final RecordFilter mockFilter2 = mock(RecordFilter.class); when(mockFilter1.includeRecord(eq("MyTopic"), eq(0), anyLong(), any(), any())) .thenReturn(true, false, true, true, true); when(mockFilter2.includeRecord(eq("MyTopic"), eq(0), anyLong(), any(), any())) .thenReturn(true, true, false, true); final RecordFilterDefinition recordFilterDefinition1 = new RecordFilterDefinition(mockFilter1, new HashMap<>()); final RecordFilterDefinition recordFilterDefinition2 = new RecordFilterDefinition(mockFilter2, new HashMap<>()); // Create ConsumerConfigs final Map<String, Object> consumerConfigs = new HashMap<>(); consumerConfigs.put(RecordFilterInterceptor.CONFIG_KEY, Lists.newArrayList(recordFilterDefinition1, recordFilterDefinition2)); // Create interceptor. final RecordFilterInterceptor interceptor = new RecordFilterInterceptor(); // Call configure interceptor.configure(consumerConfigs); // Create ConsumerRecords final ConsumerRecords consumerRecords = createConsumerRecords(totalRecords); // Pass through interceptor final ConsumerRecords results = interceptor.onConsume(consumerRecords); // Validate we got the expected results assertEquals("Should have 3 records", totalRecords - 2, results.count()); for (Iterator<ConsumerRecord> it = results.iterator(); it.hasNext(); ) { final ConsumerRecord consumerRecord = it.next(); assertNotEquals("Should not have offsets 1 and 3", 1, consumerRecord.offset()); assertNotEquals("Should not have offsets 1 and 3", 3, consumerRecord.offset()); } // Verify mocks verify(mockFilter1, times(totalRecords)) .includeRecord(eq("MyTopic"), eq(0), anyLong(), any(), any()); verify(mockFilter2, times(totalRecords - 1)) .includeRecord(eq("MyTopic"), eq(0), anyLong(), any(), any()); }
Example 20
Source File: RecordFilterInterceptor.java From kafka-webview with MIT License | 4 votes |
@Override public ConsumerRecords onConsume(final ConsumerRecords records) { final Map<TopicPartition, List<ConsumerRecord>> filteredRecords = new HashMap<>(); // Iterate thru records final Iterator<ConsumerRecord> recordIterator = records.iterator(); while (recordIterator.hasNext()) { final ConsumerRecord record = recordIterator.next(); boolean result = true; // Iterate through filters for (final RecordFilterDefinition recordFilterDefinition : recordFilterDefinitions) { // Pass through filter result = recordFilterDefinition.getRecordFilter().includeRecord( record.topic(), record.partition(), record.offset(), record.key(), record.value() ); // If we return false if (!result) { // break out of loop break; } } // If filter return true if (result) { // Include it in the results final TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition()); filteredRecords.putIfAbsent(topicPartition, new ArrayList<>()); filteredRecords.get(topicPartition).add(record); } } // return filtered results return new ConsumerRecords(filteredRecords); }