Java Code Examples for org.apache.kafka.clients.consumer.ConsumerRecord#partition()
The following examples show how to use
org.apache.kafka.clients.consumer.ConsumerRecord#partition() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NewApiTopicConsumer.java From azeroth with Apache License 2.0 | 6 votes |
@Override public Boolean call() { logger.debug("Number of records received : {}", records.count()); try { for (final ConsumerRecord<String, Serializable> record : records) { TopicPartition tp = new TopicPartition(record.topic(), record.partition()); logger.info("Record received topicPartition : {}, offset : {}", tp, record.offset()); partitionToUncommittedOffsetMap.put(tp, record.offset()); processConsumerRecords(record); } } catch (Exception e) { logger.error("Error while consuming", e); } return true; }
Example 2
Source File: ParallelWebKafkaConsumer.java From kafka-webview with MIT License | 6 votes |
private List<KafkaResult> consume(final KafkaConsumer kafkaConsumer) { final List<KafkaResult> kafkaResultList = new ArrayList<>(); final ConsumerRecords<?,?> consumerRecords = kafkaConsumer.poll(pollTimeoutDuration); logger.info("Consumed {} records", consumerRecords.count()); for (final ConsumerRecord consumerRecord : consumerRecords) { // Get next record // Convert to KafkaResult. final KafkaResult kafkaResult = new KafkaResult( consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp(), consumerRecord.key(), consumerRecord.value() ); // Add to list. kafkaResultList.add(kafkaResult); } // Commit offsets commit(kafkaConsumer); return kafkaResultList; }
Example 3
Source File: Messages.java From ja-micro with Apache License 2.0 | 6 votes |
static Message<? extends com.google.protobuf.Message> fromKafka(com.google.protobuf.Message protoMessage, Envelope envelope, ConsumerRecord<String, byte[]> record) { boolean wasReceived = true; Topic topic = new Topic(record.topic()); String partitioningKey = record.key(); int partitionId = record.partition(); long offset = record.offset(); String messageId = envelope.getMessageId(); String correlationId = envelope.getCorrelationId(); MessageType type = MessageType.of(protoMessage); String requestCorrelationId = envelope.getRequestCorrelationId(); Topic replyTo = new Topic(envelope.getReplyTo()); Metadata meta = new Metadata(wasReceived, topic, partitioningKey, partitionId, offset, messageId, correlationId, requestCorrelationId, replyTo, type); return new Message<>(protoMessage, meta); }
Example 4
Source File: AbstractKafkaInputOperator.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
@Override public void emitTuples() { int count = consumerWrapper.messageSize(); if (maxTuplesPerWindow > 0) { count = Math.min(count, maxTuplesPerWindow - emitCount); } for (int i = 0; i < count; i++) { Pair<String, ConsumerRecord<byte[], byte[]>> tuple = consumerWrapper.pollMessage(); ConsumerRecord<byte[], byte[]> msg = tuple.getRight(); emitTuple(tuple.getLeft(), msg); AbstractKafkaPartitioner.PartitionMeta pm = new AbstractKafkaPartitioner.PartitionMeta(tuple.getLeft(), msg.topic(), msg.partition()); offsetTrack.put(pm, msg.offset() + 1); if (isIdempotent() && !windowStartOffset.containsKey(pm)) { windowStartOffset.put(pm, msg.offset()); } } emitCount += count; processConsumerError(); }
Example 5
Source File: KafkaEasyTransMsgConsumerImpl.java From EasyTransaction with Apache License 2.0 | 5 votes |
private void reconsumeLater(ConsumerRecord<String, byte[]> consumeRecord) throws InterruptedException, ExecutionException { // add all header to headList except RETRY_COUNT Headers headers = consumeRecord.headers(); List<Header> headerList = new ArrayList<Header>(8); Iterator<Header> iterator = headers.iterator(); Integer retryCount = -1; boolean hasOrignalHeader = false; while (iterator.hasNext()) { Header next = iterator.next(); if (next.key().equals(RETRY_COUNT_KEY)) { retryCount = serializer.deserialize(next.value()); continue; } if(next.key().equals(ORGINAL_TOPIC)){ hasOrignalHeader = true; } headerList.add(next); } // add RETRY_COUNT to header retryCount++; headerList.add(new RecordHeader(RETRY_COUNT_KEY, serializer.serialization(retryCount))); if(!hasOrignalHeader){ headerList.add(new RecordHeader(ORGINAL_TOPIC, serializer.serialization(consumeRecord.topic()))); } // send message to corresponding queue according to retry times String retryTopic = calcRetryTopic(consumeRecord.topic(), retryCount); ProducerRecord<String, byte[]> record = new ProducerRecord<>(retryTopic, consumeRecord.partition() % retryQueuePartitionCount.get(retryTopic), null, consumeRecord.key(), consumeRecord.value(), headerList); Future<RecordMetadata> publishKafkaMessage = retryQueueMsgProducer.publishKafkaMessage(record); publishKafkaMessage.get(); }
Example 6
Source File: DefaultMessageTransformer.java From uReplicator with Apache License 2.0 | 5 votes |
@Override public ProducerRecord process(ConsumerRecord record) { String topic = topicMapping.getOrDefault(record.topic(), record.topic()); int partitionCount = 0; if (topicPartitionCountObserver != null) { partitionCount = topicPartitionCountObserver.getPartitionCount(topic); } Integer partition = partitionCount > 0 && record.partition() >= 0 ? record.partition() % partitionCount : null; Long timpstamp = record.timestamp() <= 0 ? null : record.timestamp(); return new ProducerRecord(topic, partition, timpstamp, record.key(), record.value(), record.headers()); }
Example 7
Source File: MonitorConsumer.java From kafka-monitor with Apache License 2.0 | 5 votes |
public MonitorConsumerRecord receive() { if (recordIterator == null || !recordIterator.hasNext()) recordIterator = consumer.poll(Long.MAX_VALUE).iterator(); ConsumerRecord<String, String> record = recordIterator.next(); return new MonitorConsumerRecord(record.topic(), record.partition(), record.offset(), record.key(), record.value()); }
Example 8
Source File: KafkaInput.java From envelope with Apache License 2.0 | 5 votes |
@Override public Row call(ConsumerRecord record) { return new RowWithSchema( getProvidingSchema(), record.key(), record.value(), record.timestamp(), record.topic(), record.partition(), record.offset()); }
Example 9
Source File: SubscriptionManager.java From kafka-pubsub-emulator with Apache License 2.0 | 5 votes |
/** * Fills the {@code returnedMessages} List with up to {@code maxMessages} Message objects by * retrieving ConsumerRecords from the head of the buffered queue. */ private void fillFromBuffer(List<PubsubMessage> returnedMessages, int maxMessages) { ConsumerRecord<String, ByteBuffer> record; int dequeued = 0; String messageId; while (returnedMessages.size() < maxMessages && !buffer.isEmpty()) { try { record = buffer.remove(); dequeued++; messageId = record.partition() + "-" + record.offset(); queueSizeBytes.addAndGet(-record.serializedValueSize()); returnedMessages.add( PubsubMessage.newBuilder() .putAllAttributes(buildAttributesMap(record.headers())) .setData(ByteString.copyFrom(record.value())) .setMessageId(messageId) .setPublishTime( Timestamp.newBuilder() .setSeconds(record.timestamp() / 1000) .setNanos((int) ((record.timestamp() % 1000) * 1000000)) .build()) .build()); } catch (NoSuchElementException e) { break; } } logger.atFine().log("Dequeued %d messages from buffer", dequeued); }
Example 10
Source File: NewConsumer.java From kafka-monitor with Apache License 2.0 | 5 votes |
@Override public BaseConsumerRecord receive() { if (_recordIter == null || !_recordIter.hasNext()) { _recordIter = _consumer.poll(Duration.ofMillis(Long.MAX_VALUE)).iterator(); } ConsumerRecord<String, String> record = _recordIter.next(); return new BaseConsumerRecord(record.topic(), record.partition(), record.offset(), record.key(), record.value()); }
Example 11
Source File: KafkaRowConverterImpl.java From calcite with Apache License 2.0 | 5 votes |
/** * Parse and reformat Kafka message from consumer, to align with row schema * defined as {@link #rowDataType(String)}. * @param message, the raw Kafka message record; * @return fields in the row */ @Override public Object[] toRow(final ConsumerRecord<byte[], byte[]> message) { Object[] fields = new Object[5]; fields[0] = message.partition(); fields[1] = message.timestamp(); fields[2] = message.offset(); fields[3] = message.key(); fields[4] = message.value(); return fields; }
Example 12
Source File: EventuateKafkaAggregateSubscriptions.java From light-eventuate-4j with Apache License 2.0 | 5 votes |
private SerializedEvent toSerializedEvent(ConsumerRecord<String, String> record) { PublishedEvent pe = JSonMapper.fromJson(record.value(), PublishedEvent.class); return new SerializedEvent( Int128.fromString(pe.getId()), pe.getEntityId(), pe.getEntityType(), pe.getEventData(), pe.getEventType(), record.partition(), record.offset(), EtopEventContext.make(pe.getId(), record.topic(), record.partition(), record.offset()), pe.getMetadata()); }
Example 13
Source File: KafkaRowConverterTest.java From calcite with Apache License 2.0 | 5 votes |
/** * Parse and reformat Kafka message from consumer, to fit with row schema * defined as {@link #rowDataType(String)}. * @param message, the raw Kafka message record; * @return fields in the row */ @Override public Object[] toRow(final ConsumerRecord<String, String> message) { Object[] fields = new Object[3]; fields[0] = message.topic(); fields[1] = message.partition(); fields[2] = message.timestampType().name; return fields; }
Example 14
Source File: KafkaUnboundedReader.java From DataflowTemplates with Apache License 2.0 | 4 votes |
@Override public boolean advance() throws IOException { /* Read first record (if any). we need to loop here because : * - (a) some records initially need to be skipped if they are before consumedOffset * - (b) if curBatch is empty, we want to fetch next batch and then advance. * - (c) curBatch is an iterator of iterators. we interleave the records from each. * curBatch.next() might return an empty iterator. */ while (true) { if (curBatch.hasNext()) { PartitionState<K, V> pState = curBatch.next(); if (!pState.recordIter.hasNext()) { // -- (c) pState.recordIter = Collections.emptyIterator(); // drop ref curBatch.remove(); continue; } elementsRead.inc(); elementsReadBySplit.inc(); ConsumerRecord<byte[], byte[]> rawRecord = pState.recordIter.next(); long expected = pState.nextOffset; long offset = rawRecord.offset(); if (offset < expected) { // -- (a) // this can happen when compression is enabled in Kafka (seems to be fixed in 0.10) // should we check if the offset is way off from consumedOffset (say > 1M)? LOG.warn( "{}: ignoring already consumed offset {} for {}", this, offset, pState.topicPartition); continue; } long offsetGap = offset - expected; // could be > 0 when Kafka log compaction is enabled. if (curRecord == null) { LOG.info("{}: first record offset {}", name, offset); offsetGap = 0; } // Apply user deserializers. User deserializers might throw, which will be propagated up // and 'curRecord' remains unchanged. The runner should close this reader. // TODO: write records that can't be deserialized to a "dead-letter" additional output. KafkaRecord<K, V> record = new KafkaRecord<>( rawRecord.topic(), rawRecord.partition(), rawRecord.offset(), consumerSpEL.getRecordTimestamp(rawRecord), consumerSpEL.getRecordTimestampType(rawRecord), ConsumerSpEL.hasHeaders ? rawRecord.headers() : null, keyDeserializerInstance.deserialize(rawRecord.topic(), rawRecord.key()), valueDeserializerInstance.deserialize(rawRecord.topic(), rawRecord.value())); curTimestamp = pState.timestampPolicy.getTimestampForRecord(pState.mkTimestampPolicyContext(), record); curRecord = record; int recordSize = (rawRecord.key() == null ? 0 : rawRecord.key().length) + (rawRecord.value() == null ? 0 : rawRecord.value().length); pState.recordConsumed(offset, recordSize, offsetGap); bytesRead.inc(recordSize); bytesReadBySplit.inc(recordSize); return true; } else { // -- (b) nextBatch(); if (!curBatch.hasNext()) { return false; } } } }
Example 15
Source File: DBusConsumerRecord.java From DBus with Apache License 2.0 | 4 votes |
public DBusConsumerRecord(ConsumerRecord<K, V> record) { this(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(), record.checksum(), record.serializedKeySize(), record.serializedValueSize(), record.key(), record.value()); }
Example 16
Source File: KafkaConsumer09.java From datacollector with Apache License 2.0 | 4 votes |
@Override MessageAndOffset getMessageAndOffset(ConsumerRecord message, boolean isEnabled) { return new MessageAndOffset(message.key(), message.value(), message.offset(), message.partition()); }
Example 17
Source File: SocketKafkaConsumer.java From kafka-webview with MIT License | 4 votes |
@Override public void run() { // Rename thread. Thread.currentThread().setName("WebSocket Consumer: " + clientConfig.getConsumerId()); logger.info("Starting socket consumer for {}", clientConfig.getConsumerId()); // Determine where to start from. initializeStartingPosition(clientConfig.getStartingPosition()); do { // Start trying to consume messages from kafka final ConsumerRecords<?,?> consumerRecords = kafkaConsumer.poll(pollTimeoutDuration); // If no records found if (consumerRecords.isEmpty()) { // Sleep for a bit sleep(POLL_TIMEOUT_MS); // Skip to next iteration of loop. continue; } // Push messages onto output queue for (final ConsumerRecord consumerRecord : consumerRecords) { // Translate record final KafkaResult kafkaResult = new KafkaResult( consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp(), consumerRecord.key(), consumerRecord.value() ); // Add to the queue, this operation may block, effectively preventing the consumer from // consuming unbounded-ly. try { outputQueue.put(kafkaResult); } catch (final InterruptedException interruptedException) { // InterruptedException means we should shut down. requestStop(); } } // Commit state async. kafkaConsumer.commitAsync(); // Sleep for a bit sleep(DWELL_TIME_MS); } while (!requestStop); // requestStop kafkaConsumer.close(); logger.info("Shutdown consumer {}", clientConfig.getConsumerId()); }
Example 18
Source File: KafkaConsumerManager.java From vertx-kafka-service with Apache License 2.0 | 4 votes |
private void read() { while (!consumer.subscription().isEmpty()) { final ConsumerRecords<String, String> records = consumer.poll(60000); if (shutdownRequested.get()) { executeStopConsumer(); } final Iterator<ConsumerRecord<String, String>> iterator = records.iterator(); while (iterator.hasNext()) { if (shutdownRequested.get()) { executeStopConsumer(); return; } rateLimiter.ifPresent(limiter -> limiter.acquire()); final PrometheusMetrics.InProgressMessage inProgressMessage = prometheusMetrics.messageStarted(); final int phase = phaser.register(); lastPhase.set(phase); final ConsumerRecord<String, String> msg = iterator.next(); final long offset = msg.offset(); final long partition = msg.partition(); unacknowledgedOffsets.add(offset); lastReadOffset.set(offset); lastCommittedOffset.compareAndSet(0, offset); currentPartition.compareAndSet(-1, partition); handle(msg.value(), partition, offset, configuration.getMaxRetries(), configuration.getInitialRetryDelaySeconds(), inProgressMessage); if (unacknowledgedOffsets.size() >= configuration.getMaxUnacknowledged() || partititionChanged(partition) || tooManyUncommittedOffsets(offset) || commitTimeoutReached()) { LOG.info("{}: Got {} unacknowledged messages, waiting for ACKs in order to commit", configuration.getKafkaTopic(), unacknowledgedOffsets.size()); if (!waitForAcks(phase)) { return; } commitOffsetsIfAllAcknowledged(offset); LOG.info("{}: Continuing message processing on partition {}", configuration.getKafkaTopic(), currentPartition.get()); } } } LOG.info("{}: ConsumerManager:read exited loop, consuming of messages has ended.", configuration.getKafkaTopic()); }
Example 19
Source File: RecordFilterInterceptor.java From kafka-webview with MIT License | 4 votes |
@Override public ConsumerRecords onConsume(final ConsumerRecords records) { final Map<TopicPartition, List<ConsumerRecord>> filteredRecords = new HashMap<>(); // Iterate thru records final Iterator<ConsumerRecord> recordIterator = records.iterator(); while (recordIterator.hasNext()) { final ConsumerRecord record = recordIterator.next(); boolean result = true; // Iterate through filters for (final RecordFilterDefinition recordFilterDefinition : recordFilterDefinitions) { // Pass through filter result = recordFilterDefinition.getRecordFilter().includeRecord( record.topic(), record.partition(), record.offset(), record.key(), record.value() ); // If we return false if (!result) { // break out of loop break; } } // If filter return true if (result) { // Include it in the results final TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition()); filteredRecords.putIfAbsent(topicPartition, new ArrayList<>()); filteredRecords.get(topicPartition).add(record); } } // return filtered results return new ConsumerRecords(filteredRecords); }
Example 20
Source File: KafkaEventMessage.java From extension-kafka with Apache License 2.0 | 3 votes |
/** * Construct a {@link KafkaEventMessage} based on the deserialized body, the {@code eventMessage}, of a {@link * ConsumerRecord} retrieved from a Kafka topic. The {@code trackingToken} is used to change the {@code * eventMessage} in an {@link TrackedEventMessage}. * * @param eventMessage the {@link EventMessage} to wrap * @param consumerRecord the {@link ConsumerRecord} which the given {@code eventMessage} was the body of * @param trackingToken the {@link KafkaTrackingToken} defining the position of this message * @return the {@link KafkaEventMessage} constructed from the given {@code eventMessage}, {@code consumerRecord} and * {@code trackingToken} */ public static KafkaEventMessage from(EventMessage<?> eventMessage, ConsumerRecord<?, ?> consumerRecord, KafkaTrackingToken trackingToken) { return new KafkaEventMessage( asTrackedEventMessage(eventMessage, trackingToken), consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp() ); }