Java Code Examples for org.apache.kafka.clients.consumer.ConsumerRecord#value()
The following examples show how to use
org.apache.kafka.clients.consumer.ConsumerRecord#value() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaChunkedCommandTransport.java From remoting-kafka-plugin with MIT License | 7 votes |
@Override public byte[] readBlock(Channel channel) throws IOException, ClassNotFoundException { consumer.subscribe(consumerTopics); while (true) { byte[] data = null; consumer.subscribe(consumerTopics); while (true) { ConsumerRecords<String, byte[]> records = consumer.poll(pollTimeout); for (ConsumerRecord<String, byte[]> record : records) { if (record.key().equals(consumerKey)) { data = record.value(); } } if (data != null) return data; } } }
Example 2
Source File: ParallelWebKafkaConsumer.java From kafka-webview with MIT License | 6 votes |
private List<KafkaResult> consume(final KafkaConsumer kafkaConsumer) { final List<KafkaResult> kafkaResultList = new ArrayList<>(); final ConsumerRecords<?,?> consumerRecords = kafkaConsumer.poll(pollTimeoutDuration); logger.info("Consumed {} records", consumerRecords.count()); for (final ConsumerRecord consumerRecord : consumerRecords) { // Get next record // Convert to KafkaResult. final KafkaResult kafkaResult = new KafkaResult( consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp(), consumerRecord.key(), consumerRecord.value() ); // Add to list. kafkaResultList.add(kafkaResult); } // Commit offsets commit(kafkaConsumer); return kafkaResultList; }
Example 3
Source File: ConsumerLease.java From nifi with Apache License 2.0 | 5 votes |
private void writeData(final ProcessSession session, ConsumerRecord<byte[], byte[]> record, final TopicPartition topicPartition) { FlowFile flowFile = session.create(); final BundleTracker tracker = new BundleTracker(record, topicPartition, keyEncoding); tracker.incrementRecordCount(1); final byte[] value = record.value(); if (value != null) { flowFile = session.write(flowFile, out -> { out.write(value); }); } flowFile = session.putAllAttributes(flowFile, getAttributes(record)); tracker.updateFlowFile(flowFile); populateAttributes(tracker); session.transfer(tracker.flowFile, REL_SUCCESS); }
Example 4
Source File: KafkaITCase.java From flink with Apache License 2.0 | 5 votes |
@Override public Long deserialize(ConsumerRecord<byte[], byte[]> record) throws IOException { cnt++; DataInputView in = new DataInputViewStreamWrapper(new ByteArrayInputStream(record.value())); Long e = ser.deserialize(in); return e; }
Example 5
Source File: ConsumerLease.java From nifi with Apache License 2.0 | 5 votes |
private void handleParseFailure(final ConsumerRecord<byte[], byte[]> consumerRecord, final ProcessSession session, final Exception cause, final String message) { // If we are unable to parse the data, we need to transfer it to 'parse failure' relationship final Map<String, String> attributes = getAttributes(consumerRecord); attributes.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(consumerRecord.offset())); attributes.put(KafkaProcessorUtils.KAFKA_TIMESTAMP, String.valueOf(consumerRecord.timestamp())); attributes.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(consumerRecord.partition())); attributes.put(KafkaProcessorUtils.KAFKA_TOPIC, consumerRecord.topic()); FlowFile failureFlowFile = session.create(); final byte[] value = consumerRecord.value(); if (value != null) { failureFlowFile = session.write(failureFlowFile, out -> out.write(value)); } failureFlowFile = session.putAllAttributes(failureFlowFile, attributes); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, consumerRecord.topic()); session.getProvenanceReporter().receive(failureFlowFile, transitUri); session.transfer(failureFlowFile, REL_PARSE_FAILURE); if (cause == null) { logger.error(message); } else { logger.error(message, cause); } session.adjustCounter("Parse Failures", 1, false); }
Example 6
Source File: KafkaSource.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
@Override public String getMessageTemplate(StreamingSourceConfig streamingSourceConfig) { String template = null; KafkaConsumer<byte[], byte[]> consumer = null; try { String topicName = getTopicName(streamingSourceConfig.getProperties()); Map<String, Object> config = getKafkaConf(streamingSourceConfig.getProperties()); consumer = new KafkaConsumer<>(config); Set<TopicPartition> partitions = Sets.newHashSet(FluentIterable.from(consumer.partitionsFor(topicName)) .transform(new Function<PartitionInfo, TopicPartition>() { @Override public TopicPartition apply(PartitionInfo input) { return new TopicPartition(input.topic(), input.partition()); } })); consumer.assign(partitions); consumer.seekToBeginning(partitions); ConsumerRecords<byte[], byte[]> records = consumer.poll(500); if (records == null) { return null; } Iterator<ConsumerRecord<byte[], byte[]>> iterator = records.iterator(); if (iterator == null || !iterator.hasNext()) { return null; } ConsumerRecord<byte[], byte[]> record = iterator.next(); template = new String(record.value(), "UTF8"); } catch (Exception e) { logger.error("error when fetch one record from kafka, stream:" + streamingSourceConfig.getName(), e); } finally { if (consumer != null) { consumer.close(); } } return template; }
Example 7
Source File: NewApiTopicConsumer.java From azeroth with Apache License 2.0 | 5 votes |
/** * @param record */ private void processConsumerRecords(final ConsumerRecord<String, Serializable> record) { final MessageHandler messageHandler = topicHandlers.get(record.topic()); consumerContext.saveOffsetsBeforeProcessed(record.topic(), record.partition(), record.offset()); //兼容没有包装的情况 final DefaultMessage message = record.value() instanceof DefaultMessage ? (DefaultMessage) record.value() : new DefaultMessage((Serializable) record.value()); //第一阶段处理 messageHandler.p1Process(message); //第二阶段处理 processExecutor.submit(new Runnable() { @Override public void run() { try { messageHandler.p2Process(message); // consumerContext.saveOffsetsAfterProcessed(record.topic(), record.partition(), record.offset()); } catch (Exception e) { boolean processed = messageHandler.onProcessError(message); if (processed == false) { errorMessageProcessor.submit(message, messageHandler); } logger.error("[" + messageHandler.getClass().getSimpleName() + "] process Topic[" + record.topic() + "] error", e); } } }); }
Example 8
Source File: TypeInformationKeyValueSerializationSchema.java From flink with Apache License 2.0 | 5 votes |
@Override public Tuple2<K, V> deserialize(ConsumerRecord<byte[], byte[]> record) throws Exception { K key = null; V value = null; if (record.key() != null) { inputDeserializer.setBuffer(record.key()); key = keySerializer.deserialize(inputDeserializer); } if (record.value() != null) { inputDeserializer.setBuffer(record.value()); value = valueSerializer.deserialize(inputDeserializer); } return new Tuple2<>(key, value); }
Example 9
Source File: Kafka010ITCase.java From flink with Apache License 2.0 | 5 votes |
@Override public Long deserialize(ConsumerRecord<byte[], byte[]> record) throws IOException { cnt++; DataInputView in = new DataInputViewStreamWrapper(new ByteArrayInputStream(record.value())); Long e = ser.deserialize(in); return e; }
Example 10
Source File: OrderDetailsService.java From qcon-microservices with Apache License 2.0 | 5 votes |
private void startService(String configFile) throws IOException { startConsumer(configFile); startProducer(configFile); try { consumer.subscribe(singletonList(Schemas.Topics.ORDERS.name())); while (running) { ConsumerRecords<String, Order> records = consumer.poll(Duration.ofMillis(100)); for (ConsumerRecord<String, Order> record : records) { Order order = record.value(); if (order.getState() == OrderState.CREATED) { // Validate the order (using validate()) OrderValidationResult validationResult = validate(order); // create a ProducerRecord from the order and result (see record()) // then produce the result to Kafka using the existing producer producer.send(record(order, validationResult)); } } } } finally { close(); } }
Example 11
Source File: KafkaConsumerThread.java From flink with Apache License 2.0 | 5 votes |
/** * * @param records List of ConsumerRecords. * @return Total batch size in bytes, including key and value. */ private int getRecordBatchSize(ConsumerRecords<byte[], byte[]> records) { int recordBatchSizeBytes = 0; for (ConsumerRecord<byte[], byte[]> record: records) { // Null is an allowed value for the key if (record.key() != null) { recordBatchSizeBytes += record.key().length; } recordBatchSizeBytes += record.value().length; } return recordBatchSizeBytes; }
Example 12
Source File: MessageInspector.java From Kafdrop with Apache License 2.0 | 5 votes |
private MessageVO createMessage(ConsumerRecord<byte[], byte[]> record, MessageDeserializer deserializer) { MessageVO vo = new MessageVO(); vo.setTopic(record.topic()); vo.setOffset(record.offset()); vo.setPartition(record.partition()); if (record.key() != null && record.key().length > 0) { vo.setKey(readString(record.key())); } if (record.value() != null && record.value().length > 0) { final String messageString; if (deserializer != null) { messageString = deserializer.deserializeMessage(ByteBuffer.wrap(record.value())); } else { messageString = readString(record.value()); } vo.setMessage(messageString); } vo.setTimestamp(record.timestamp()); vo.setTimestampType(record.timestampType().toString()); StreamSupport.stream(record.headers().spliterator(), false) .forEachOrdered(header -> vo.addHeader(header.key(), new String(header.value(), StandardCharsets.UTF_8))); return vo; }
Example 13
Source File: MappedMetricDataTimestampExtractor.java From adaptive-alerting with Apache License 2.0 | 5 votes |
@Override public long extract(ConsumerRecord<Object, Object> record, long previousTimestamp) { val mappedMetricData = (MappedMetricData) record.value(); if (mappedMetricData == null || mappedMetricData.getMetricData() == null) { // -1 skips the record. Don't log as it can fill up the logs. return -1L; } return mappedMetricData.getMetricData().getTimestamp() * 1000L; }
Example 14
Source File: KafkaSubribleLister.java From ext-opensource-netty with Mozilla Public License 2.0 | 5 votes |
@KafkaListener(topics = {"mqtt-internal"}) public void consumer(ConsumerRecord<String, String> consumerRecord) { String kafkaMessage = consumerRecord.value(); NettyLog.info("internalRecvice: " + kafkaMessage); if (kafkaMessage != null) { if (serverRecvice != null) { serverRecvice.processServerRecviceMesage(kafkaMessage); } } }
Example 15
Source File: KafkaUnboundedReader.java From beam with Apache License 2.0 | 4 votes |
@Override public boolean advance() throws IOException { /* Read first record (if any). we need to loop here because : * - (a) some records initially need to be skipped if they are before consumedOffset * - (b) if curBatch is empty, we want to fetch next batch and then advance. * - (c) curBatch is an iterator of iterators. we interleave the records from each. * curBatch.next() might return an empty iterator. */ while (true) { if (curBatch.hasNext()) { PartitionState<K, V> pState = curBatch.next(); if (!pState.recordIter.hasNext()) { // -- (c) pState.recordIter = Collections.emptyIterator(); // drop ref curBatch.remove(); continue; } elementsRead.inc(); elementsReadBySplit.inc(); ConsumerRecord<byte[], byte[]> rawRecord = pState.recordIter.next(); long expected = pState.nextOffset; long offset = rawRecord.offset(); if (offset < expected) { // -- (a) // this can happen when compression is enabled in Kafka (seems to be fixed in 0.10) // should we check if the offset is way off from consumedOffset (say > 1M)? LOG.warn( "{}: ignoring already consumed offset {} for {}", this, offset, pState.topicPartition); continue; } long offsetGap = offset - expected; // could be > 0 when Kafka log compaction is enabled. if (curRecord == null) { LOG.info("{}: first record offset {}", name, offset); offsetGap = 0; } // Apply user deserializers. User deserializers might throw, which will be propagated up // and 'curRecord' remains unchanged. The runner should close this reader. // TODO: write records that can't be deserialized to a "dead-letter" additional output. KafkaRecord<K, V> record = new KafkaRecord<>( rawRecord.topic(), rawRecord.partition(), rawRecord.offset(), consumerSpEL.getRecordTimestamp(rawRecord), consumerSpEL.getRecordTimestampType(rawRecord), ConsumerSpEL.hasHeaders() ? rawRecord.headers() : null, keyDeserializerInstance.deserialize(rawRecord.topic(), rawRecord.key()), valueDeserializerInstance.deserialize(rawRecord.topic(), rawRecord.value())); curTimestamp = pState.timestampPolicy.getTimestampForRecord(pState.mkTimestampPolicyContext(), record); curRecord = record; int recordSize = (rawRecord.key() == null ? 0 : rawRecord.key().length) + (rawRecord.value() == null ? 0 : rawRecord.value().length); pState.recordConsumed(offset, recordSize, offsetGap); bytesRead.inc(recordSize); bytesReadBySplit.inc(recordSize); return true; } else { // -- (b) nextBatch(); if (!curBatch.hasNext()) { return false; } } } }
Example 16
Source File: KafkaEvent.java From mewbase with MIT License | 4 votes |
public KafkaEvent(ConsumerRecord<String, byte[]> rec) { eventNumber = rec.offset(); epochMillis = rec.timestamp(); eventBuf = rec.value(); crc32 = EventUtils.checksum(eventBuf) ; }
Example 17
Source File: UnprocessedEventEntity.java From integration-patterns with MIT License | 4 votes |
public UnprocessedEventEntity(ConsumerRecord<String, String> consumerRecord) { this.key=consumerRecord.key(); this.payload= consumerRecord.value(); this.topic = consumerRecord.topic(); }
Example 18
Source File: KafkaUnboundedReader.java From DataflowTemplates with Apache License 2.0 | 4 votes |
@Override public boolean advance() throws IOException { /* Read first record (if any). we need to loop here because : * - (a) some records initially need to be skipped if they are before consumedOffset * - (b) if curBatch is empty, we want to fetch next batch and then advance. * - (c) curBatch is an iterator of iterators. we interleave the records from each. * curBatch.next() might return an empty iterator. */ while (true) { if (curBatch.hasNext()) { PartitionState<K, V> pState = curBatch.next(); if (!pState.recordIter.hasNext()) { // -- (c) pState.recordIter = Collections.emptyIterator(); // drop ref curBatch.remove(); continue; } elementsRead.inc(); elementsReadBySplit.inc(); ConsumerRecord<byte[], byte[]> rawRecord = pState.recordIter.next(); long expected = pState.nextOffset; long offset = rawRecord.offset(); if (offset < expected) { // -- (a) // this can happen when compression is enabled in Kafka (seems to be fixed in 0.10) // should we check if the offset is way off from consumedOffset (say > 1M)? LOG.warn( "{}: ignoring already consumed offset {} for {}", this, offset, pState.topicPartition); continue; } long offsetGap = offset - expected; // could be > 0 when Kafka log compaction is enabled. if (curRecord == null) { LOG.info("{}: first record offset {}", name, offset); offsetGap = 0; } // Apply user deserializers. User deserializers might throw, which will be propagated up // and 'curRecord' remains unchanged. The runner should close this reader. // TODO: write records that can't be deserialized to a "dead-letter" additional output. KafkaRecord<K, V> record = new KafkaRecord<>( rawRecord.topic(), rawRecord.partition(), rawRecord.offset(), consumerSpEL.getRecordTimestamp(rawRecord), consumerSpEL.getRecordTimestampType(rawRecord), ConsumerSpEL.hasHeaders ? rawRecord.headers() : null, keyDeserializerInstance.deserialize(rawRecord.topic(), rawRecord.key()), valueDeserializerInstance.deserialize(rawRecord.topic(), rawRecord.value())); curTimestamp = pState.timestampPolicy.getTimestampForRecord(pState.mkTimestampPolicyContext(), record); curRecord = record; int recordSize = (rawRecord.key() == null ? 0 : rawRecord.key().length) + (rawRecord.value() == null ? 0 : rawRecord.value().length); pState.recordConsumed(offset, recordSize, offsetGap); bytesRead.inc(recordSize); bytesReadBySplit.inc(recordSize); return true; } else { // -- (b) nextBatch(); if (!curBatch.hasNext()) { return false; } } } }
Example 19
Source File: KafkaBytesSource.java From pulsar with Apache License 2.0 | 4 votes |
@Override public byte[] extractValue(ConsumerRecord<String, byte[]> record) { return record.value(); }
Example 20
Source File: TestRepartitionWindowApp.java From samza with Apache License 2.0 | 4 votes |
@Test public void testRepartitionedSessionWindowCounter() throws Exception { // create topics createTopic(INPUT_TOPIC, 3); createTopic(OUTPUT_TOPIC, 1); // produce messages to different partitions. ObjectMapper mapper = new ObjectMapper(); PageView pv = new PageView("india", "5.com", "userId1"); produceMessage(INPUT_TOPIC, 0, "userId1", mapper.writeValueAsString(pv)); pv = new PageView("china", "4.com", "userId2"); produceMessage(INPUT_TOPIC, 1, "userId2", mapper.writeValueAsString(pv)); pv = new PageView("india", "1.com", "userId1"); produceMessage(INPUT_TOPIC, 2, "userId1", mapper.writeValueAsString(pv)); pv = new PageView("india", "2.com", "userId1"); produceMessage(INPUT_TOPIC, 0, "userId1", mapper.writeValueAsString(pv)); pv = new PageView("india", "3.com", "userId1"); produceMessage(INPUT_TOPIC, 1, "userId1", mapper.writeValueAsString(pv)); Map<String, String> configs = new HashMap<>(); configs.put(JobCoordinatorConfig.JOB_COORDINATOR_FACTORY, "org.apache.samza.standalone.PassthroughJobCoordinatorFactory"); configs.put(JobConfig.PROCESSOR_ID, "0"); configs.put(TaskConfig.GROUPER_FACTORY, "org.apache.samza.container.grouper.task.GroupByContainerIdsFactory"); // run the application runApplication(new RepartitionWindowApp(), APP_NAME, configs); // consume and validate result List<ConsumerRecord<String, String>> messages = consumeMessages(Collections.singletonList(OUTPUT_TOPIC), 2); Assert.assertEquals(messages.size(), 2); for (ConsumerRecord<String, String> message : messages) { String key = message.key(); String value = message.value(); // Assert that there are 4 messages for userId1 and 1 message for userId2. Assert.assertTrue(key.equals("userId1") || key.equals("userId2")); if ("userId1".equals(key)) { Assert.assertEquals(value, "4"); } else { Assert.assertEquals(value, "1"); } } }