Java Code Examples for kafka.javaapi.OffsetResponse#errorCode()
The following examples show how to use
kafka.javaapi.OffsetResponse#errorCode() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LegacyKafkaClient.java From secor with Apache License 2.0 | 6 votes |
private long findLastOffset(TopicPartition topicPartition, SimpleConsumer consumer) { TopicAndPartition topicAndPartition = new TopicAndPartition(topicPartition.getTopic(), topicPartition.getPartition()); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo( kafka.api.OffsetRequest.LatestTime(), 1)); final String clientName = getClientName(topicPartition); OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName); OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) { throw new RuntimeException("Error fetching offset data. Reason: " + response.errorCode(topicPartition.getTopic(), topicPartition.getPartition())); } long[] offsets = response.offsets(topicPartition.getTopic(), topicPartition.getPartition()); return offsets[0] - 1; }
Example 2
Source File: SimpleKafkaConsumer.java From twill with Apache License 2.0 | 5 votes |
/** * Retrieves the last offset before the given timestamp for a given topic partition. * * @return The last offset before the given timestamp or {@code 0} if failed to do so. */ private long getLastOffset(TopicPartition topicPart, long timestamp) { BrokerInfo brokerInfo = brokerService.getLeader(topicPart.getTopic(), topicPart.getPartition()); SimpleConsumer consumer = brokerInfo == null ? null : consumers.getUnchecked(brokerInfo); // If no broker, treat it as failure attempt. if (consumer == null) { LOG.warn("Failed to talk to any broker. Default offset to 0 for {}", topicPart); return 0L; } // Fire offset request OffsetRequest request = new OffsetRequest(ImmutableMap.of( new TopicAndPartition(topicPart.getTopic(), topicPart.getPartition()), new PartitionOffsetRequestInfo(timestamp, 1) ), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); OffsetResponse response = consumer.getOffsetsBefore(request); // Retrieve offsets from response long[] offsets = response.hasError() ? null : response.offsets(topicPart.getTopic(), topicPart.getPartition()); if (offsets == null || offsets.length <= 0) { short errorCode = response.errorCode(topicPart.getTopic(), topicPart.getPartition()); // If the topic partition doesn't exists, use offset 0 without logging error. if (errorCode != ErrorMapping.UnknownTopicOrPartitionCode()) { consumers.refresh(brokerInfo); LOG.warn("Failed to fetch offset for {} with timestamp {}. Error: {}. Default offset to 0.", topicPart, timestamp, errorCode); } return 0L; } LOG.debug("Offset {} fetched for {} with timestamp {}.", offsets[0], topicPart, timestamp); return offsets[0]; }
Example 3
Source File: KafkaLatestOffsetFetcher.java From eagle with Apache License 2.0 | 5 votes |
public long getLatestOffset(SimpleConsumer consumer, String topic, int partition, String clientName) { TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition); Map<TopicAndPartition, kafka.api.PartitionOffsetRequestInfo> requestInfo = new HashMap<>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1)); kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName); OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) { throw new RuntimeException("Error fetching data offset from the broker. Reason: " + response.errorCode(topic, partition)); } long[] offsets = response.offsets(topic, partition); return offsets[0]; }
Example 4
Source File: KafkaSimpleConsumer.java From Pistachio with Apache License 2.0 | 5 votes |
public long getLastOffset() throws InterruptedException { OffsetResponse response = null; Broker previousLeader = leaderBroker; while (true) { TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1)); kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest( requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientId); ensureConsumer(previousLeader); try { response = consumer.getOffsetsBefore(request); } catch (Exception e) { // e could be an instance of ClosedByInterruptException as SimpleConsumer.fetch uses nio if (Thread.interrupted()) { logger.info("catch exception of {} with interrupted in getLastOffset for {} - {}", e.getClass().getName(), topic, partitionId); throw new InterruptedException(); } logger.warn("caughte exception in getLastOffset {} - {}", topic, partitionId, e); response = null; } if (response == null || response.hasError()) { short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode(); logger.warn("Error fetching data Offset for {} - {}, the Broker. Reason: {}", topic, partitionId, errorCode); stopConsumer(); previousLeader = leaderBroker; leaderBroker = null; continue; } break; } long[] offsets = response.offsets(topic, partitionId); return offsets[offsets.length - 1]; }
Example 5
Source File: KafkaWrapper.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private long getOffset(KafkaPartition partition, Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo) throws KafkaOffsetRetrievalFailureException { SimpleConsumer consumer = this.getSimpleConsumer(partition.getLeader().getHostAndPort()); for (int i = 0; i < this.fetchOffsetRetries; i++) { try { OffsetResponse offsetResponse = consumer.getOffsetsBefore(new OffsetRequest(offsetRequestInfo, kafka.api.OffsetRequest.CurrentVersion(), this.clientName)); if (offsetResponse.hasError()) { throw new RuntimeException( "offsetReponse has error: " + offsetResponse.errorCode(partition.getTopicName(), partition.getId())); } return offsetResponse.offsets(partition.getTopicName(), partition.getId())[0]; } catch (Exception e) { LOG.warn( String.format("Fetching offset for partition %s has failed %d time(s). Reason: %s", partition, i + 1, e)); if (i < this.fetchOffsetRetries - 1) { try { Thread.sleep((long) ((i + Math.random()) * 1000)); } catch (InterruptedException e2) { LOG.error("Caught interrupted exception between retries of getting latest offsets. " + e2); } } } } throw new KafkaOffsetRetrievalFailureException( String.format("Fetching offset for partition %s has failed.", partition)); }
Example 6
Source File: Kafka08ConsumerClient.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private long getOffset(KafkaPartition partition, Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo) throws KafkaOffsetRetrievalFailureException { SimpleConsumer consumer = this.getSimpleConsumer(partition.getLeader().getHostAndPort()); for (int i = 0; i < this.fetchOffsetRetries; i++) { try { OffsetResponse offsetResponse = consumer.getOffsetsBefore(new OffsetRequest(offsetRequestInfo, kafka.api.OffsetRequest.CurrentVersion(), this.clientName)); if (offsetResponse.hasError()) { throw new RuntimeException("offsetReponse has error: " + offsetResponse.errorCode(partition.getTopicName(), partition.getId())); } return offsetResponse.offsets(partition.getTopicName(), partition.getId())[0]; } catch (Exception e) { log.warn(String.format("Fetching offset for partition %s has failed %d time(s). Reason: %s", partition, i + 1, e)); if (i < this.fetchOffsetRetries - 1) { try { Thread.sleep((long) ((i + Math.random()) * 1000)); } catch (InterruptedException e2) { log.error("Caught interrupted exception between retries of getting latest offsets. " + e2); } } } } throw new KafkaOffsetRetrievalFailureException(String.format("Fetching offset for partition %s has failed.", partition)); }
Example 7
Source File: KafkaStreamMetadataProvider.java From incubator-pinot with Apache License 2.0 | 4 votes |
/** * Fetches the numeric Kafka offset for this partition for a symbolic name ("largest" or "smallest"). * * @param offsetCriteria * @param timeoutMillis Timeout in milliseconds * @throws java.util.concurrent.TimeoutException If the operation could not be completed within {@code timeoutMillis} * milliseconds * @return An offset */ @Override public synchronized StreamPartitionMsgOffset fetchStreamPartitionOffset(@Nonnull OffsetCriteria offsetCriteria, long timeoutMillis) throws java.util.concurrent.TimeoutException { Preconditions.checkState(isPartitionProvided, "Cannot fetch partition offset. StreamMetadataProvider created without partition information"); Preconditions.checkNotNull(offsetCriteria); final long offsetRequestTime; if (offsetCriteria.isLargest()) { offsetRequestTime = kafka.api.OffsetRequest.LatestTime(); } else if (offsetCriteria.isSmallest()) { offsetRequestTime = kafka.api.OffsetRequest.EarliestTime(); } else { throw new IllegalArgumentException("Unknown initial offset value " + offsetCriteria.toString()); } int kafkaErrorCount = 0; final int MAX_KAFKA_ERROR_COUNT = 10; final long endTime = System.currentTimeMillis() + timeoutMillis; while (System.currentTimeMillis() < endTime) { // Try to get into a state where we're connected to Kafka while (_currentState.getStateValue() != KafkaConnectionHandler.ConsumerState.CONNECTED_TO_PARTITION_LEADER && System.currentTimeMillis() < endTime) { _currentState.process(); } if (_currentState.getStateValue() != KafkaConnectionHandler.ConsumerState.CONNECTED_TO_PARTITION_LEADER && endTime <= System.currentTimeMillis()) { throw new TimeoutException(); } // Send the offset request to Kafka OffsetRequest request = new OffsetRequest(Collections.singletonMap(new TopicAndPartition(_topic, _partition), new PartitionOffsetRequestInfo(offsetRequestTime, 1)), kafka.api.OffsetRequest.CurrentVersion(), _clientId); OffsetResponse offsetResponse; try { offsetResponse = _simpleConsumer.getOffsetsBefore(request); } catch (Exception e) { _currentState.handleConsumerException(e); continue; } final short errorCode = offsetResponse.errorCode(_topic, _partition); if (errorCode == Errors.NONE.code()) { long offset = offsetResponse.offsets(_topic, _partition)[0]; if (offset == 0L) { LOGGER.warn("Fetched offset of 0 for topic {} and partition {}, is this a newly created topic?", _topic, _partition); } return new LongMsgOffset(offset); } else if (errorCode == Errors.LEADER_NOT_AVAILABLE.code()) { // If there is no leader, it'll take some time for a new leader to be elected, wait 100 ms before retrying Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); } else { // Retry after a short delay kafkaErrorCount++; if (MAX_KAFKA_ERROR_COUNT < kafkaErrorCount) { throw exceptionForKafkaErrorCode(errorCode); } Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); } } throw new TimeoutException(); }