Java Code Examples for org.apache.kafka.clients.consumer.ConsumerRecord#offset()
The following examples show how to use
org.apache.kafka.clients.consumer.ConsumerRecord#offset() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SecorKafkaClient.java From secor with Apache License 2.0 | 6 votes |
private Message readSingleMessage(KafkaConsumer<byte[], byte[]> kafkaConsumer) { int pollAttempts = 0; Message message = null; while (pollAttempts < MAX_READ_POLL_ATTEMPTS) { Iterator<ConsumerRecord<byte[], byte[]>> records = kafkaConsumer.poll(Duration.ofSeconds(mPollTimeout)).iterator(); if (!records.hasNext()) { pollAttempts++; } else { ConsumerRecord<byte[], byte[]> record = records.next(); List<MessageHeader> headers = new ArrayList<>(); record.headers().forEach(header -> headers.add(new MessageHeader(header.key(), header.value()))); message = new Message(record.topic(), record.partition(), record.offset(), record.key(), record.value(), record.timestamp(), headers); break; } } if (message == null) { LOG.warn("unable to fetch message after " + MAX_READ_POLL_ATTEMPTS + " Retries"); } return message; }
Example 2
Source File: KafkaShortRetentionTestBase.java From flink with Apache License 2.0 | 6 votes |
@Override public String deserialize(ConsumerRecord<byte[], byte[]> record) { final long offset = record.offset(); if (offset != nextExpected) { numJumps++; nextExpected = offset; LOG.info("Registered now jump at offset {}", offset); } nextExpected++; try { Thread.sleep(10); // slow down data consumption to trigger log eviction } catch (InterruptedException e) { throw new RuntimeException("Stopping it"); } return ""; }
Example 3
Source File: KafkaShortRetentionTestBase.java From flink with Apache License 2.0 | 6 votes |
@Override public String deserialize(ConsumerRecord<byte[], byte[]> record) { final long offset = record.offset(); if (offset != nextExpected) { numJumps++; nextExpected = offset; LOG.info("Registered now jump at offset {}", offset); } nextExpected++; try { Thread.sleep(10); // slow down data consumption to trigger log eviction } catch (InterruptedException e) { throw new RuntimeException("Stopping it"); } return ""; }
Example 4
Source File: KafkaOffsetCanalConnector.java From canal-1.1.3 with Apache License 2.0 | 6 votes |
/** * 获取Kafka消息,不确认 * * @param timeout * @param unit * @param offset 消息偏移地址(-1为不偏移) * @return * @throws CanalClientException */ public List<KafkaFlatMessage> getFlatListWithoutAck(Long timeout, TimeUnit unit, long offset) throws CanalClientException { waitClientRunning(); if (!running) { return Lists.newArrayList(); } if (offset > -1) { TopicPartition tp = new TopicPartition(topic, partition == null ? 0 : partition); kafkaConsumer2.seek(tp, offset); } ConsumerRecords<String, String> records = kafkaConsumer2.poll(unit.toMillis(timeout)); if (!records.isEmpty()) { List<KafkaFlatMessage> flatMessages = new ArrayList<>(); for (ConsumerRecord<String, String> record : records) { String flatMessageJson = record.value(); FlatMessage flatMessage = JSON.parseObject(flatMessageJson, FlatMessage.class); KafkaFlatMessage message = new KafkaFlatMessage(flatMessage, record.offset()); flatMessages.add(message); } return flatMessages; } return Lists.newArrayList(); }
Example 5
Source File: FlowLineCheckService.java From DBus with Apache License 2.0 | 6 votes |
private boolean secondStep(KafkaConsumer<String, byte[]> consumerSecond, long time, Map<String, Boolean> retMap, long offset) { boolean isOk = false; try { long start = System.currentTimeMillis(); while ((System.currentTimeMillis() - start < 1000 * 20) && !isOk) { ConsumerRecords<String, byte[]> records = consumerSecond.poll(1000); for (ConsumerRecord<String, byte[]> record : records) { if (record.offset() >= offset) { isOk = true; break; } } } } catch (Exception e) { retMap.put("status", false); logger.error("auto check table second step error.", e); } return isOk; }
Example 6
Source File: AtMostOnceConsumer.java From javabase with Apache License 2.0 | 6 votes |
private static void processRecords(KafkaConsumer<String, String> consumer) throws InterruptedException { while (true) { ConsumerRecords<String, String> records = consumer.poll(100); long lastOffset = 0; for (ConsumerRecord<String, String> record : records) { System.out.printf("\n\roffset = %d, key = %s, value = %s", record.offset(), record.key(), record.value()); lastOffset = record.offset(); } System.out.println("lastOffset read: " + lastOffset); process(); } }
Example 7
Source File: FlowLineCheckService.java From DBus with Apache License 2.0 | 6 votes |
private boolean thirdStep(KafkaConsumer<String, byte[]> consumerThird, long time, Map<String, Boolean> retMap, long offset) { boolean isOk = false; try { long start = System.currentTimeMillis(); while ((System.currentTimeMillis() - start < 1000 * 20) && !isOk) { ConsumerRecords<String, byte[]> records = consumerThird.poll(1000); for (ConsumerRecord<String, byte[]> record : records) { //if (StringUtils.contains(record.key(), String.valueOf(time))) { if (record.offset() >= offset) { isOk = true; break; } } } } catch (Exception e) { retMap.put("status", false); logger.error("auto check table third step error.", e); } return isOk; }
Example 8
Source File: OffsetBlockingMessageQueue.java From ja-micro with Apache License 2.0 | 5 votes |
@Override public synchronized void processingEnded(KafkaTopicInfo topicInfo) { final ConsumerRecord<String, String> record = inProgress.get(topicInfo.getPartition()); if (record != null && record.offset() == topicInfo.getOffset()) { //Processing failed. Schedule retry. retryExecutor.schedule(() -> messageExecutor.execute(record), retryDelayMillis, TimeUnit.MILLISECONDS); } }
Example 9
Source File: EventuateKafkaAggregateSubscriptions.java From light-eventuate-4j with Apache License 2.0 | 5 votes |
private SerializedEvent toSerializedEvent(ConsumerRecord<String, String> record) { PublishedEvent pe = JSonMapper.fromJson(record.value(), PublishedEvent.class); return new SerializedEvent( Int128.fromString(pe.getId()), pe.getEntityId(), pe.getEntityType(), pe.getEventData(), pe.getEventType(), record.partition(), record.offset(), EtopEventContext.make(pe.getId(), record.topic(), record.partition(), record.offset()), pe.getMetadata()); }
Example 10
Source File: DefaultWebKafkaConsumer.java From kafka-webview with MIT License | 5 votes |
private List<KafkaResult> consume() { final List<KafkaResult> kafkaResultList = new ArrayList<>(); final ConsumerRecords consumerRecords = kafkaConsumer.poll(pollTimeoutDuration); logger.info("Consumed {} records", consumerRecords.count()); final Iterator<ConsumerRecord> recordIterator = consumerRecords.iterator(); while (recordIterator.hasNext()) { // Get next record final ConsumerRecord consumerRecord = recordIterator.next(); // Convert to KafkaResult. final KafkaResult kafkaResult = new KafkaResult( consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp(), consumerRecord.key(), consumerRecord.value() ); // Add to list. kafkaResultList.add(kafkaResult); } // Commit offsets commit(); return kafkaResultList; }
Example 11
Source File: CheckFlowLineHandler.java From DBus with Apache License 2.0 | 5 votes |
private void thirdStep(BufferedWriter bw, KafkaConsumer<String, byte[]> consumer, long offset) throws Exception { boolean isOk = false; try { long start = System.currentTimeMillis(); while ((System.currentTimeMillis() - start < 1000 * 15) && !isOk) { ConsumerRecords<String, byte[]> records = consumer.poll(1000); for (ConsumerRecord<String, byte[]> record : records) { if (record.offset() >= offset) { isOk = true; bw.write("data arrive at topic: " + record.topic()); bw.newLine(); break; } } } } catch (Exception e) { bw.write("auto check table third step error: " + e.getMessage()); bw.newLine(); throw new RuntimeException("auto check table third step error", e); } if (!isOk) { bw.write("flow line third step time out"); bw.newLine(); throw new RuntimeException("flow line third step time out"); } }
Example 12
Source File: KafkaMirrorMakerConnectorTask.java From brooklin with BSD 2-Clause "Simplified" License | 5 votes |
@Override protected DatastreamProducerRecord translate(ConsumerRecord<?, ?> fromKafka, Instant readTime) { long eventsSourceTimestamp = fromKafka.timestampType() == TimestampType.LOG_APPEND_TIME ? fromKafka.timestamp() : readTime.toEpochMilli(); HashMap<String, String> metadata = new HashMap<>(); metadata.put(KAFKA_ORIGIN_CLUSTER, _mirrorMakerSource.getBrokerListString()); String topic = fromKafka.topic(); metadata.put(KAFKA_ORIGIN_TOPIC, topic); int partition = fromKafka.partition(); String partitionStr = String.valueOf(partition); metadata.put(KAFKA_ORIGIN_PARTITION, partitionStr); long offset = fromKafka.offset(); String offsetStr = String.valueOf(offset); metadata.put(KAFKA_ORIGIN_OFFSET, offsetStr); metadata.put(BrooklinEnvelopeMetadataConstants.EVENT_TIMESTAMP, String.valueOf(eventsSourceTimestamp)); BrooklinEnvelope envelope = new BrooklinEnvelope(fromKafka.key(), fromKafka.value(), null, metadata); DatastreamProducerRecordBuilder builder = new DatastreamProducerRecordBuilder(); builder.addEvent(envelope); builder.setEventsSourceTimestamp(eventsSourceTimestamp); builder.setSourceCheckpoint(new KafkaMirrorMakerCheckpoint(topic, partition, offset).toString()); builder.setDestination(_datastreamTask.getDatastreamDestination() .getConnectionString() .replace(KafkaMirrorMakerConnector.MM_TOPIC_PLACEHOLDER, StringUtils.isBlank(_destinationTopicPrefix) ? topic : _destinationTopicPrefix + topic)); if (_isIdentityMirroringEnabled) { builder.setPartition(partition); } return builder.build(); }
Example 13
Source File: ConsumerLease.java From nifi with Apache License 2.0 | 5 votes |
private BundleTracker(final ConsumerRecord<byte[], byte[]> initialRecord, final TopicPartition topicPartition, final String keyEncoding, final RecordSetWriter recordWriter) { this.initialOffset = initialRecord.offset(); this.initialTimestamp = initialRecord.timestamp(); this.partition = topicPartition.partition(); this.topic = topicPartition.topic(); this.recordWriter = recordWriter; this.key = encodeKafkaKey(initialRecord.key(), keyEncoding); }
Example 14
Source File: SubscriptionManager.java From kafka-pubsub-emulator with Apache License 2.0 | 5 votes |
/** * Fills the {@code returnedMessages} List with up to {@code maxMessages} Message objects by * retrieving ConsumerRecords from the head of the buffered queue. */ private void fillFromBuffer(List<PubsubMessage> returnedMessages, int maxMessages) { ConsumerRecord<String, ByteBuffer> record; int dequeued = 0; String messageId; while (returnedMessages.size() < maxMessages && !buffer.isEmpty()) { try { record = buffer.remove(); dequeued++; messageId = record.partition() + "-" + record.offset(); queueSizeBytes.addAndGet(-record.serializedValueSize()); returnedMessages.add( PubsubMessage.newBuilder() .putAllAttributes(buildAttributesMap(record.headers())) .setData(ByteString.copyFrom(record.value())) .setMessageId(messageId) .setPublishTime( Timestamp.newBuilder() .setSeconds(record.timestamp() / 1000) .setNanos((int) ((record.timestamp() % 1000) * 1000000)) .build()) .build()); } catch (NoSuchElementException e) { break; } } logger.atFine().log("Dequeued %d messages from buffer", dequeued); }
Example 15
Source File: ConsumerIterator.java From uReplicator with Apache License 2.0 | 5 votes |
@Override public ConsumerRecord makeNext() { Iterator<ConsumerRecord> localCurrent = current.get(); // if we don't have an iterator, get one if (localCurrent == null || !localCurrent.hasNext() || consumedEndBounded()) { FetchedDataChunk currentChuck; try { if (consumerTimeoutMs < 0) { currentChuck = channel.take(); } else { currentChuck = channel.poll(consumerTimeoutMs, TimeUnit.MILLISECONDS); if (currentChuck == null) { resetState(); throw new ConsumerTimeoutException(); } } } catch (InterruptedException e) { LOGGER.error("Error poll from channel.", e); resetState(); throw new RuntimeException(e.getMessage(), e); } localCurrent = currentChuck.consumerRecords().iterator(); currentPartitionInfo = currentChuck.partitionOffsetInfo(); current.set(localCurrent); } ConsumerRecord item = localCurrent.next(); while (item.offset() < currentPartitionInfo.consumeOffset() && localCurrent.hasNext()) { item = localCurrent.next(); } nextOffset = item.offset() + 1; return item; }
Example 16
Source File: Kafka09ConsumerClient.java From incubator-gobblin with Apache License 2.0 | 4 votes |
public Kafka09ConsumerRecord(ConsumerRecord<K, V> consumerRecord) { // Kafka 09 consumerRecords do not provide value size. // Only 08 and 10 versions provide them. super(consumerRecord.offset(), BaseKafkaConsumerRecord.VALUE_SIZE_UNAVAILABLE, consumerRecord.topic(), consumerRecord.partition()); this.consumerRecord = consumerRecord; }
Example 17
Source File: PartitionManager.java From kafka-spark-consumer with Apache License 2.0 | 4 votes |
private void fill() { String topic = _kafkaconfig._stateConf.get(Config.KAFKA_TOPIC); ConsumerRecords<byte[], byte[]> msgs; // FetchSize controlls number of messages pulled every poll from Kafka int fetchSize = getFetchSize(); //Fetch messages from Kafka // Total messages fetched from Kafka int recordCount = 0; msgs = fetchMessages(topic); for (ConsumerRecord<byte[], byte[]> msgAndOffset : msgs) { if (msgAndOffset != null) { if(recordCount == fetchSize) { //Reached the poll limit break; } long offset = msgAndOffset.offset(); byte[] key = msgAndOffset.key(); byte[] payload = msgAndOffset.value(); //Get Kafka message headers _lastEnquedOffset = offset; _emittedToOffset = offset + 1; //Process only when fetched messages are having higher offset than last committed offset if (_lastEnquedOffset >= _lastComittedOffset) { if (payload != null) { MessageAndMetadata<?> mm = null; try { //Perform Message Handling if configured. mm = _handler.handle(_lastEnquedOffset, _partition, _topic, _consumerId, payload); if (key != null) { mm.setKey(key); } } catch (Exception e) { LOG.error("Process Failed for offset {} partition {} topic {}", key, _partition, _topic, e); } if (_kafkaconfig._numFetchToBuffer > 1) { // Add to buffer if(mm != null ) { _arrayBuffer.add(mm); _numFetchBuffered = _numFetchBuffered + 1; } //Trigger write when buffer reach the limit LOG.debug("number of fetch buffered for partition {} is {}", _partition.partition, _numFetchBuffered); if (_numFetchBuffered > _kafkaconfig._numFetchToBuffer) { triggerBlockManagerWrite(); LOG.debug("Trigger BM write till offset {} for Partition {}", _lastEnquedOffset, _partition.partition); } } else { //nothing to buffer. Just add to Spark Block Manager try { synchronized (_receiver) { if(mm != null) { _receiver.store(mm); _lastComittedOffset = _emittedToOffset; LOG.debug("PartitionManager sucessfully written offset {} for partition {} to BM", _lastEnquedOffset, _partition.partition); } } } catch (Exception ex) { _receiver.reportError("Retry Store for Partition " + _partition, ex); } } } } } recordCount++; } }
Example 18
Source File: MapRStreamsConsumer09.java From datacollector with Apache License 2.0 | 4 votes |
@Override MessageAndOffset getMessageAndOffset(ConsumerRecord message, boolean isEnabled) { return new MessageAndOffset(message.key(), message.value(), message.offset(), message.partition()); }
Example 19
Source File: DBusConsumerRecord.java From DBus with Apache License 2.0 | 4 votes |
public DBusConsumerRecord(ConsumerRecord<K, V> record) { this(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(), record.checksum(), record.serializedKeySize(), record.serializedValueSize(), record.key(), record.value()); }
Example 20
Source File: KafkaEventMessage.java From extension-kafka with Apache License 2.0 | 3 votes |
/** * Construct a {@link KafkaEventMessage} based on the deserialized body, the {@code eventMessage}, of a {@link * ConsumerRecord} retrieved from a Kafka topic. The {@code trackingToken} is used to change the {@code * eventMessage} in an {@link TrackedEventMessage}. * * @param eventMessage the {@link EventMessage} to wrap * @param consumerRecord the {@link ConsumerRecord} which the given {@code eventMessage} was the body of * @param trackingToken the {@link KafkaTrackingToken} defining the position of this message * @return the {@link KafkaEventMessage} constructed from the given {@code eventMessage}, {@code consumerRecord} and * {@code trackingToken} */ public static KafkaEventMessage from(EventMessage<?> eventMessage, ConsumerRecord<?, ?> consumerRecord, KafkaTrackingToken trackingToken) { return new KafkaEventMessage( asTrackedEventMessage(eventMessage, trackingToken), consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp() ); }