Java Code Examples for org.apache.kafka.clients.consumer.OffsetAndMetadata#offset()
The following examples show how to use
org.apache.kafka.clients.consumer.OffsetAndMetadata#offset() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DefaultOffsetsState.java From kafka-workers with Apache License 2.0 | 6 votes |
private void removeCommitted(TopicPartition partition, OffsetAndMetadata offsetAndMetadata) { ConsumedOffsets consumedOffsets = consumedOffsetsMap.get(partition); SortedRanges processedOffsets = processedOffsetsMap.get(partition); if (consumedOffsets == null || processedOffsets == null) { logger.warn("Aborting removeCommitted for partition [{}] (partition probably unregistered)", partition); return; } synchronized (consumedOffsets) { synchronized (processedOffsets) { long maxOffsetToRemove = offsetAndMetadata.offset() - 1; consumedOffsets.removeElementsLowerOrEqual(maxOffsetToRemove); processedOffsets.removeElementsLowerOrEqual(maxOffsetToRemove); } } computeMetricInfo(partition); }
Example 2
Source File: LiKafkaConsumerImpl.java From li-apache-kafka-clients with BSD 2-Clause "Simplified" License | 6 votes |
@Override public void seekToCommitted(Collection<TopicPartition> partitions) { // current offsets are being moved so don't throw cached exceptions in poll. clearRecordProcessingException(); for (TopicPartition tp : partitions) { OffsetAndMetadata offsetAndMetadata = _kafkaConsumer.committed(tp); if (offsetAndMetadata == null) { throw new NoOffsetForPartitionException(tp); } _kafkaConsumer.seek(tp, offsetAndMetadata.offset()); _consumerRecordsProcessor.clear(tp); Long hw = LiKafkaClientsUtils.offsetFromWrappedMetadata(offsetAndMetadata.metadata()); if (hw == null) { hw = offsetAndMetadata.offset(); } _consumerRecordsProcessor.setPartitionConsumerHighWaterMark(tp, hw); } }
Example 3
Source File: KafkaSpout.java From storm_spring_boot_demo with MIT License | 6 votes |
/** * sets the cursor to the location dictated by the first poll strategy and returns the fetch offset */ private long doSeek(TopicPartition tp, OffsetAndMetadata committedOffset) { long fetchOffset; if (committedOffset != null) { // offset was committed for this TopicPartition if (firstPollOffsetStrategy.equals(EARLIEST)) { kafkaConsumer.seekToBeginning(Collections.singleton(tp)); fetchOffset = kafkaConsumer.position(tp); } else if (firstPollOffsetStrategy.equals(LATEST)) { kafkaConsumer.seekToEnd(Collections.singleton(tp)); fetchOffset = kafkaConsumer.position(tp); } else { // By default polling starts at the last committed offset. +1 to point fetch to the first uncommitted offset. fetchOffset = committedOffset.offset() + 1; kafkaConsumer.seek(tp, fetchOffset); } } else { // no commits have ever been done, so start at the beginning or end depending on the strategy if (firstPollOffsetStrategy.equals(EARLIEST) || firstPollOffsetStrategy.equals(UNCOMMITTED_EARLIEST)) { kafkaConsumer.seekToBeginning(Collections.singleton(tp)); } else if (firstPollOffsetStrategy.equals(LATEST) || firstPollOffsetStrategy.equals(UNCOMMITTED_LATEST)) { kafkaConsumer.seekToEnd(Collections.singleton(tp)); } fetchOffset = kafkaConsumer.position(tp); } return fetchOffset; }
Example 4
Source File: ProcessingKafkaConsumerITest.java From common-kafka with Apache License 2.0 | 6 votes |
@Override protected void commitOffsets(Map<TopicPartition, OffsetAndMetadata> offsetsToCommit) throws KafkaException { super.commitOffsets(offsetsToCommit); // Record the commit in the history for all applicable records for (RecordId recordId : recordsToBeCommitted) { OffsetAndMetadata offset = offsetsToCommit.get(new TopicPartition(recordId.topic, recordId.partition)); if (offset != null && offset.offset() > recordId.offset) { recordsToBeCommitted.remove(recordId); // Delay history recording if there is an ack in progress so that we can verify ack/commit order if (ackInProgress) { recordsCommittedDuringAck.add(recordId); } else { addRecordHistory(recordId, Action.COMMITTED); } } } }
Example 5
Source File: ConsumerTest.java From kbear with Apache License 2.0 | 6 votes |
protected void commitSync( java.util.function.BiConsumer<Consumer<String, String>, Map<TopicPartition, OffsetAndMetadata>> committer) throws InterruptedException { produceMessages(); try (Consumer<String, String> consumer = createConsumerWithoutAutoCommit()) { consumer.subscribe(_topics); pollDurationTimeout(consumer); OffsetAndMetadata committed = consumer.committed(_topicPartition); System.out.println("committed: " + committed); OffsetAndMetadata committed2 = new OffsetAndMetadata(committed.offset() + _messageCount, committed.metadata()); System.out.println("committed2: " + committed2); Map<TopicPartition, OffsetAndMetadata> offsetMap = new HashMap<>(); offsetMap.put(_topicPartition, committed2); committer.accept(consumer, offsetMap); OffsetAndMetadata committed3 = consumer.committed(_topicPartition); System.out.println("committed3: " + committed3); Assert.assertEquals(committed2.offset(), committed3.offset()); } }
Example 6
Source File: LiKafkaConsumerImpl.java From li-apache-kafka-clients with BSD 2-Clause "Simplified" License | 5 votes |
@Override public Long committedSafeOffset(TopicPartition tp) { OffsetAndMetadata rawOffsetAndMetadata = _kafkaConsumer.committed(tp); if (rawOffsetAndMetadata == null || rawOffsetAndMetadata.metadata().isEmpty()) { return null; } return rawOffsetAndMetadata.offset(); }
Example 7
Source File: NewApiTopicConsumer.java From jeesuite-libs with Apache License 2.0 | 5 votes |
/** * 按上次记录重置offsets */ private void resetCorrectOffsets(ConsumerWorker worker) { KafkaConsumer<String, Serializable> consumer = worker.consumer; Map<String, List<PartitionInfo>> topicInfos = consumer.listTopics(); Set<String> topics = topicInfos.keySet(); List<String> expectTopics = new ArrayList<>(topicHandlers.keySet()); List<PartitionInfo> patitions = null; consumer.poll(200); for (String topic : topics) { if(!expectTopics.contains(topic))continue; patitions = topicInfos.get(topic); for (PartitionInfo partition : patitions) { try { //期望的偏移 long expectOffsets = consumerContext.getLatestProcessedOffsets(topic, partition.partition()); // TopicPartition topicPartition = new TopicPartition(partition.topic(), partition.partition()); OffsetAndMetadata metadata = consumer.committed(topicPartition); Set<TopicPartition> assignment = consumer.assignment(); if(assignment.contains(topicPartition)){ if(expectOffsets > 0 && expectOffsets < metadata.offset()){ consumer.seek(topicPartition, expectOffsets); //consumer.seekToBeginning(assignment); logger.info(">>>>>>> seek Topic[{}] partition[{}] from {} to {}",topic, partition.partition(),metadata.offset(),expectOffsets); } } } catch (Exception e) { logger.warn("try seek topic["+topic+"] partition["+partition.partition()+"] offsets error"); } } } consumer.resume(consumer.assignment()); }
Example 8
Source File: Consumer.java From ja-micro with Apache License 2.0 | 5 votes |
private void checkIfRefreshCommitRequired() { // Here's the issue: // The retention of __consumer_offsets is less than most topics itself, so we need to re-commit regularly to keep the // last committed offset per consumer group. This is especially an issue in cases were we have bursty / little traffic. Map<TopicPartition, OffsetAndMetadata> commitOffsets = new HashMap<>(); long now = System.currentTimeMillis(); if (nextCommitRefreshRequiredTimestamp < now) { nextCommitRefreshRequiredTimestamp = now + COMMIT_REFRESH_INTERVAL_MILLIS; for (PartitionProcessor processor : partitions.allProcessors()) { TopicPartition assignedPartition = processor.getAssignedPartition(); long lastCommittedOffset = processor.getLastCommittedOffset(); // We haven't committed from this partiton yet if (lastCommittedOffset < 0) { OffsetAndMetadata offset = kafka.committed(assignedPartition); if (offset == null) { // there was no commit on this partition at all continue; } lastCommittedOffset = offset.offset(); processor.forceSetLastCommittedOffset(lastCommittedOffset); } commitOffsets.put(assignedPartition, new OffsetAndMetadata(lastCommittedOffset)); } kafka.commitSync(commitOffsets); logger.info("Refreshing last committed offset {}", commitOffsets); } }
Example 9
Source File: KafkaSubscriberTest.java From ja-micro with Apache License 2.0 | 5 votes |
@Test public void subscriberLosesPartitionAssignment() { KafkaSubscriber<String> subscriber = new KafkaSubscriber<>(new MessageCallback(), "topic", "groupId", false, KafkaSubscriber.OffsetReset.Earliest, 1, 1, 1, 5000, 5000, KafkaSubscriber.QueueType.OffsetBlocking, 1000); KafkaTopicInfo message1 = new KafkaTopicInfo("topic", 0, 1, null); KafkaTopicInfo message2 = new KafkaTopicInfo("topic", 0, 2, null); KafkaTopicInfo message3 = new KafkaTopicInfo("topic", 1, 1, null); KafkaTopicInfo message4 = new KafkaTopicInfo("topic", 1, 2, null); subscriber.consume(message1); subscriber.consume(message2); subscriber.consume(message3); subscriber.consume(message4); KafkaConsumer realConsumer = mock(KafkaConsumer.class); class ArgMatcher implements ArgumentMatcher<Map<TopicPartition, OffsetAndMetadata>> { @Override public boolean matches(Map<TopicPartition, OffsetAndMetadata> arg) { OffsetAndMetadata oam = arg.values().iterator().next(); return oam.offset() == 3; } } doThrow(new CommitFailedException()).when(realConsumer).commitSync(argThat(new ArgMatcher())); subscriber.realConsumer = realConsumer; subscriber.offsetCommitter = new OffsetCommitter(realConsumer, Clock.systemUTC()); subscriber.consumeMessages(); }
Example 10
Source File: NewApiTopicConsumer.java From azeroth with Apache License 2.0 | 5 votes |
/** * 按上次记录重置offsets */ private void resetCorrectOffsets() { consumer.pause(consumer.assignment()); Map<String, List<PartitionInfo>> topicInfos = consumer.listTopics(); Set<String> topics = topicInfos.keySet(); List<String> expectTopics = new ArrayList<>(topicHandlers.keySet()); List<PartitionInfo> patitions = null; for (String topic : topics) { if (!expectTopics.contains(topic)) continue; patitions = topicInfos.get(topic); for (PartitionInfo partition : patitions) { try { //期望的偏移 long expectOffsets = consumerContext.getLatestProcessedOffsets(topic, partition.partition()); // TopicPartition topicPartition = new TopicPartition(topic, partition.partition()); OffsetAndMetadata metadata = consumer .committed(new TopicPartition(partition.topic(), partition.partition())); if (expectOffsets >= 0) { if (expectOffsets < metadata.offset()) { consumer.seek(topicPartition, expectOffsets); logger.info("seek Topic[{}] partition[{}] from {} to {}", topic, partition.partition(), metadata.offset(), expectOffsets); } } } catch (Exception e) { logger.warn("try seek topic[" + topic + "] partition[" + partition.partition() + "] offsets error"); } } } consumer.resume(consumer.assignment()); }
Example 11
Source File: ConsumerTest.java From kbear with Apache License 2.0 | 4 votes |
@Test public void commitAsyncWithOffsetsAndCallback() throws InterruptedException { produceMessages(); try (Consumer<String, String> consumer = createConsumerWithoutAutoCommit()) { consumer.subscribe(_topics); pollDurationTimeout(consumer); OffsetAndMetadata committed = consumer.committed(_topicPartition); System.out.println("committed: " + committed); OffsetAndMetadata committed2 = new OffsetAndMetadata(committed.offset() + _messageCount, committed.metadata()); System.out.println("committed2: " + committed2); Map<TopicPartition, OffsetAndMetadata> offsetMap = new HashMap<>(); offsetMap.put(_topicPartition, committed2); CountDownLatch latch = new CountDownLatch(offsetMap.size()); AtomicBoolean failed = new AtomicBoolean(); consumer.commitAsync(offsetMap, (m, e) -> { if (e != null) { failed.set(true); e.printStackTrace(); } else System.out.println("offsetsMap: " + m); latch.countDown(); }); long timeout = 30 * 1000; Thread.sleep(timeout); _messageCount = 0; _pollTimeout = 100; pollDurationTimeout(consumer); // callback is invoked when next poll or commit if (!latch.await(1000, TimeUnit.MILLISECONDS)) Assert.fail("commitAsyncWithCallback wait timeout: " + timeout); Assert.assertFalse(failed.get()); OffsetAndMetadata committed3 = consumer.committed(_topicPartition); System.out.println("committed3: " + committed3); Assert.assertEquals(committed2.offset(), committed3.offset()); } }
Example 12
Source File: KafkaTestEnvironmentImpl.java From flink with Apache License 2.0 | 4 votes |
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }
Example 13
Source File: KafkaTestEnvironmentImpl.java From flink with Apache License 2.0 | 4 votes |
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }
Example 14
Source File: KafkaTestEnvironmentImpl.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }
Example 15
Source File: ProcessingPartition.java From common-kafka with Apache License 2.0 | 4 votes |
/** * Returns the offset to use as the partition's last committed offset. This value indicates where we will start * reading on the partition and is used to determine what offset is eligible for committing and where to rewind to * re-read a message. The returned offset is ensured to be committed, if {@link ProcessingConfig#getCommitInitialOffset() * allowed} by the configuration. * * @return the offset to use as the partition's last committed offset * * @throws IllegalStateException if an error occurs looking up the consumer's committed offset or broker offset values */ protected long getLastCommittedOffset() { OffsetAndMetadata lastCommittedOffset; try { lastCommittedOffset = consumer.committed(topicPartition); } catch (KafkaException e) { throw new IllegalStateException("Unable to retrieve committed offset for topic partition [" + topicPartition + "]", e); } // If we don't have a committed offset use the reset offset if (lastCommittedOffset == null) { LOGGER.debug("No offset committed for partition [{}]", topicPartition); return getCommittedResetOffset(); } long offset = lastCommittedOffset.offset(); LOGGER.debug("Last committed offset for partition [{}] is [{}]", topicPartition, offset); long startOffset = getEarliestOffset(); // Verify our committed offset is not before the earliest offset if (offset < startOffset) { // If our offset is before the start offset this likely means processing was stopped/stalled for so long // messages fell off the queue and we failed to process them if (LOGGER.isErrorEnabled()) LOGGER.error("Committed offset [{}] is before earliest offset [{}] partition [{}]. This likely indicates" + " that processing missed messages", offset, startOffset, topicPartition); // If it's not in range use the reset offset as that's where we will be starting from return getCommittedResetOffset(); } long endOffset = getLatestOffset(); // Verify our committed offset is not after the latest offset if (offset > endOffset) { if (LOGGER.isWarnEnabled()) LOGGER.warn("Committed offset [{}] is after latest offset [{}] for partition [{}]. This could indicate " + "a bug in the ProcessingConsumer, a topic being re-created or something else updating offsets", offset, endOffset, topicPartition); // If it's not in range use the reset offset as that's where we will be starting from return getCommittedResetOffset(); } if (LOGGER.isDebugEnabled()) LOGGER.debug("Using committed offset [{}] for partition [{}] as it is in range of start [{}] / end [{}] broker offsets", offset, topicPartition, startOffset, endOffset); return offset; }
Example 16
Source File: PostgreSQLSinkTask.java From kafka-sink-pg-json with MIT License | 4 votes |
/** * Flushes content to the database * @param offsets map of offsets being flushed * @throws ConnectException if flush failed */ @Override public void flush(Map<TopicPartition, OffsetAndMetadata> offsets) throws ConnectException { fLog.trace("Flush start at "+System.currentTimeMillis()); try { if (iDelivery>FASTEST)//if guaranteed or synchronized iWriter.flush();//flush table writes if (iDelivery==SYNCHRONIZED) {//if synchronized delivery /* create topic, partition and offset arrays for database flush function call */ int size=offsets.size();//get number of flush map entries String[] topicArray=new String[size];//create array for topics Integer[] partitionArray=new Integer[size];//create array for partitions Long[] offsetArray=new Long[size];//create array for offsets /* populate topic, partition and offset arrays */ Iterator<Map.Entry<TopicPartition, OffsetAndMetadata>> iterator=offsets.entrySet().iterator();//create map iterator for (int i=0;i<size;++i) {//for each flush map entry Entry<TopicPartition, OffsetAndMetadata> entry=iterator.next();//get next entry TopicPartition key=entry.getKey();//get topic partition key OffsetAndMetadata value=entry.getValue();//get offset value topicArray[i]=key.topic();//put topic into array partitionArray[i]=key.partition();//put partition in to array offsetArray[i]=value.offset();//put offset into array }//for each flush map entry /* bind arays to flush statement */ iFlushStatement.setArray(1, iConnection.createArrayOf("varchar", topicArray));//bind topic array iFlushStatement.setArray(2, iConnection.createArrayOf("integer", partitionArray));//bind partition array iFlushStatement.setArray(3, iConnection.createArrayOf("bigint", offsetArray));//bind offset array /* execute the database flush function */ iFlushStatement.executeQuery(); }//if synchronized delivery } catch (SQLException | IOException exception) { throw new ConnectException(exception); }//try{} fLog.trace("Flush stop at "+System.currentTimeMillis()); }
Example 17
Source File: KafkaTestEnvironmentImpl.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }
Example 18
Source File: ConsumerRecordsProcessor.java From li-apache-kafka-clients with BSD 2-Clause "Simplified" License | 4 votes |
/** * * @param messageAssembler non-null. Assembles large segments segments * @param keyDeserializer non-null. * @param valueDeserializer non-null * @param deliveredMessageOffsetTracker non-null. Keeps a history of safe offsets. * @param auditor This may be null otherwise auditing is called when messages are complete. * @param storedOffset non-null. A function that returns the offset information stored in Kafka. This may * be a blocking call and should return null if the information is not available. */ public ConsumerRecordsProcessor(MessageAssembler messageAssembler, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, DeliveredMessageOffsetTracker deliveredMessageOffsetTracker, Auditor<K, V> auditor, Function<TopicPartition, OffsetAndMetadata> storedOffset) { _messageAssembler = messageAssembler; _keyDeserializer = keyDeserializer; _valueDeserializer = valueDeserializer; _deliveredMessageOffsetTracker = deliveredMessageOffsetTracker; _auditor = auditor; _partitionConsumerHighWatermark = new HashMap<>(); if (_auditor == null) { LOG.info("Auditing is disabled because no auditor is defined."); } _storedConsumerHighWatermark = (topicPartition) -> { OffsetAndMetadata offsetAndMetadata = storedOffset.apply(topicPartition); Long consumerHighWatermark = null; if (offsetAndMetadata != null) { consumerHighWatermark = LiKafkaClientsUtils.offsetFromWrappedMetadata(offsetAndMetadata.metadata()); } if (consumerHighWatermark == null) { return new ConsumerHighWatermarkState(); } return new ConsumerHighWatermarkState(consumerHighWatermark, offsetAndMetadata.offset()); }; // Performance optimization to avoid a call to instanceof for every deserialization call if (_valueDeserializer instanceof ExtendedDeserializer) { _deserializeStrategy = new DeserializeStrategy<V>() { @Override public V deserialize(String topic, Headers headers, byte[] data) { return ((ExtendedDeserializer<V>) _valueDeserializer).deserialize(topic, headers, data); } }; } else { _deserializeStrategy = new DeserializeStrategy<V>() { @Override public V deserialize(String topic, Headers headers, byte[] data) { return _valueDeserializer.deserialize(topic, data); } }; } }
Example 19
Source File: KafkaTestEnvironmentImpl.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }
Example 20
Source File: ConsumerOffsetClient.java From common-kafka with Apache License 2.0 | 3 votes |
/** * Returns the committed offset or -1 for the consumer group and the given topic partition * * @param topicPartition * a topic partition * @return the committed offset or -1 for the consumer group and the given topic partition * @throws org.apache.kafka.common.KafkaException * if there is an issue fetching the committed offset * @throws IllegalArgumentException * if topicPartition is null */ public long getCommittedOffset(TopicPartition topicPartition) { if (topicPartition == null) throw new IllegalArgumentException("topicPartition cannot be null"); OffsetAndMetadata offsetAndMetadata = consumer.committed(topicPartition); return offsetAndMetadata == null ? -1L : offsetAndMetadata.offset(); }