org.apache.kafka.clients.consumer.CommitFailedException Java Examples
The following examples show how to use
org.apache.kafka.clients.consumer.CommitFailedException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ProcessingKafkaConsumer.java From common-kafka with Apache License 2.0 | 6 votes |
/** * Commits all committable offsets * * @throws KafkaException * if there is an issue committing offsets to Kafka */ public synchronized void commitOffsets() { if (pauseCommit) { LOGGER.debug("Commits are paused until we poll() again"); return; } LOGGER.debug("committing offsets"); try { commitOffsets(getCommittableOffsets()); } catch(CommitFailedException e) { LOGGER.debug("Failed to commit offsets, pausing commits until next poll", e); pauseCommit = true; throw e; } }
Example #2
Source File: OffsetCommitter.java From ja-micro with Apache License 2.0 | 6 votes |
public void recommitOffsets() { LocalDateTime now = LocalDateTime.now(clock); if (now.isAfter(lastUpdateTime.plus(IDLE_DURATION))) { for (TopicPartition tp : offsetData.keySet()) { OffsetAndTime offsetAndTime = offsetData.get(tp); if (now.isAfter(offsetAndTime.time.plus(IDLE_DURATION))) { try { consumer.commitSync(Collections.singletonMap(tp, new OffsetAndMetadata(offsetAndTime.offset))); } catch (CommitFailedException covfefe) { logger.info("Caught CommitFailedException attempting to commit {} {}", tp, offsetAndTime.offset); } offsetAndTime.time = now; } } lastUpdateTime = now; } }
Example #3
Source File: MyConsumer.java From ad with Apache License 2.0 | 5 votes |
/** * 手动同步提交消息位移 */ private static void generalConsumerMessageSyncCommit() { properties.put("auto.commit.offset", "false"); consumer = new KafkaConsumer<>(properties); consumer.subscribe(Collections.singleton("kafka-topic")); try { while (true) { boolean flag = true; ConsumerRecords<String, String> records = consumer.poll(100); for (ConsumerRecord<String, String> record : records) { log.debug(String.format("topic = %s, partition = %s, key = %s, value = %s", record.topic(), record.partition(), record.key(), record.value()) ); if (StringUtils.endsWithIgnoreCase("done", record.value())) { flag = false; } } try { // 发起提交, 当前线程会阻塞, 如果发生异常会进行重试直到成功或者抛出 CommitFailedException consumer.commitSync(); } catch (CommitFailedException e) { log.error("commit failed error: {}", e.getMessage()); } if (!flag) { break; } } } finally { consumer.close(); } }
Example #4
Source File: KafkaSubscriberTest.java From ja-micro with Apache License 2.0 | 5 votes |
@Test public void subscriberLosesPartitionAssignment() { KafkaSubscriber<String> subscriber = new KafkaSubscriber<>(new MessageCallback(), "topic", "groupId", false, KafkaSubscriber.OffsetReset.Earliest, 1, 1, 1, 5000, 5000, KafkaSubscriber.QueueType.OffsetBlocking, 1000); KafkaTopicInfo message1 = new KafkaTopicInfo("topic", 0, 1, null); KafkaTopicInfo message2 = new KafkaTopicInfo("topic", 0, 2, null); KafkaTopicInfo message3 = new KafkaTopicInfo("topic", 1, 1, null); KafkaTopicInfo message4 = new KafkaTopicInfo("topic", 1, 2, null); subscriber.consume(message1); subscriber.consume(message2); subscriber.consume(message3); subscriber.consume(message4); KafkaConsumer realConsumer = mock(KafkaConsumer.class); class ArgMatcher implements ArgumentMatcher<Map<TopicPartition, OffsetAndMetadata>> { @Override public boolean matches(Map<TopicPartition, OffsetAndMetadata> arg) { OffsetAndMetadata oam = arg.values().iterator().next(); return oam.offset() == 3; } } doThrow(new CommitFailedException()).when(realConsumer).commitSync(argThat(new ArgMatcher())); subscriber.realConsumer = realConsumer; subscriber.offsetCommitter = new OffsetCommitter(realConsumer, Clock.systemUTC()); subscriber.consumeMessages(); }
Example #5
Source File: BaseKafkaConsumer09.java From datacollector with Apache License 2.0 | 5 votes |
@Override public void commit() { synchronized (pollCommitMutex) { // While rebalancing there is no point for us to commit offset since it's not allowed operation if(rebalanceInProgress.get()) { LOG.debug("Kafka is rebalancing, not commiting offsets"); return; } if(needToCallPoll.get()) { LOG.debug("Waiting on poll to be properly called before continuing."); return; } try { if(topicPartitionToOffsetMetadataMap.isEmpty()) { LOG.debug("Skipping committing offsets since we haven't consume anything."); return; } LOG.debug("Committing offsets: {}", topicPartitionToOffsetMetadataMap.toString()); kafkaConsumer.commitSync(topicPartitionToOffsetMetadataMap); } catch(CommitFailedException ex) { LOG.warn("Can't commit offset to Kafka: {}", ex.toString(), ex); // After CommitFailedException we MUST call consumer's poll() method first needToCallPoll.set(true); // The consumer thread might be stuck on writing to the queue, so we need to clean it up to unblock that thread recordQueue.clear(); } finally { // either we've committed the offsets (so now we drop them so that we don't re-commit anything) // or CommitFailedException was thrown, in which case poll needs to be called again and they are invalid topicPartitionToOffsetMetadataMap.clear(); } } }
Example #6
Source File: SecorKafkaMessageIterator.java From secor with Apache License 2.0 | 5 votes |
@Override public void commit(com.pinterest.secor.common.TopicPartition topicPartition, long offset) { TopicPartition kafkaTopicPartition = new TopicPartition(topicPartition.getTopic(), topicPartition.getPartition()); OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offset); try { LOG.info("committing {} offset {} to kafka", topicPartition, offset); mKafkaConsumer.commitSync(ImmutableMap.of(kafkaTopicPartition, offsetAndMetadata)); } catch (CommitFailedException e) { LOG.trace("kafka commit failed due to group re-balance", e); } }
Example #7
Source File: KafkaExceptionMapperTest.java From rest-utils with Apache License 2.0 | 4 votes |
@Test public void testKafkaExceptions() { //exceptions mapped in KafkaExceptionMapper verifyMapperResponse(new BrokerNotAvailableException("some message"), Status.SERVICE_UNAVAILABLE, BROKER_NOT_AVAILABLE_ERROR_CODE); verifyMapperResponse(new InvalidReplicationFactorException("some message"), Status.BAD_REQUEST, KAFKA_BAD_REQUEST_ERROR_CODE); verifyMapperResponse(new SecurityDisabledException("some message"), Status.BAD_REQUEST, KAFKA_BAD_REQUEST_ERROR_CODE); verifyMapperResponse(new UnsupportedVersionException("some message"), Status.BAD_REQUEST, KAFKA_BAD_REQUEST_ERROR_CODE); verifyMapperResponse(new InvalidPartitionsException("some message"), Status.BAD_REQUEST, KAFKA_BAD_REQUEST_ERROR_CODE); verifyMapperResponse(new InvalidRequestException("some message"), Status.BAD_REQUEST, KAFKA_BAD_REQUEST_ERROR_CODE); verifyMapperResponse(new UnknownServerException("some message"),Status.BAD_REQUEST, KAFKA_BAD_REQUEST_ERROR_CODE); verifyMapperResponse(new UnknownTopicOrPartitionException("some message"), Status.NOT_FOUND, KAFKA_UNKNOWN_TOPIC_PARTITION_CODE); verifyMapperResponse(new PolicyViolationException("some message"), Status.BAD_REQUEST, KAFKA_BAD_REQUEST_ERROR_CODE); verifyMapperResponse(new TopicExistsException("some message"), Status.BAD_REQUEST, KAFKA_BAD_REQUEST_ERROR_CODE); verifyMapperResponse(new InvalidConfigurationException("some message"), Status.BAD_REQUEST, KAFKA_BAD_REQUEST_ERROR_CODE); //test couple of retriable exceptions verifyMapperResponse(new NotCoordinatorException("some message"), Status.INTERNAL_SERVER_ERROR, KAFKA_RETRIABLE_ERROR_ERROR_CODE); verifyMapperResponse(new NotEnoughReplicasException("some message"), Status.INTERNAL_SERVER_ERROR, KAFKA_RETRIABLE_ERROR_ERROR_CODE); //test couple of kafka exception verifyMapperResponse(new CommitFailedException(), Status.INTERNAL_SERVER_ERROR, KAFKA_ERROR_ERROR_CODE); verifyMapperResponse(new ConcurrentTransactionsException("some message"), Status.INTERNAL_SERVER_ERROR, KAFKA_ERROR_ERROR_CODE); //test few general exceptions verifyMapperResponse(new NullPointerException("some message"), Status.INTERNAL_SERVER_ERROR, Status.INTERNAL_SERVER_ERROR.getStatusCode()); verifyMapperResponse(new IllegalArgumentException("some message"), Status.INTERNAL_SERVER_ERROR, Status.INTERNAL_SERVER_ERROR.getStatusCode()); }