Java Code Examples for kafka.common.ErrorMapping#NoError
The following examples show how to use
kafka.common.ErrorMapping#NoError .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaRecordReader.java From kangaroo with Apache License 2.0 | 6 votes |
/** * THIS METHOD HAS SIDE EFFECTS - it will update {@code currentMessageItr} (if necessary) and then return true iff * the iterator still has elements to be read. If you call {@link scala.collection.Iterator#next()} when this method * returns false, you risk a {@link NullPointerException} OR a no-more-elements exception. * * @return true if you can call {@link scala.collection.Iterator#next()} on {@code currentMessageItr}. */ @VisibleForTesting boolean continueItr() { final long remaining = end - currentOffset; if (!canCallNext() && remaining > 0) { final int theFetchSize = (fetchSize > remaining) ? (int) remaining : fetchSize; LOG.debug(String.format("%s fetching %d bytes starting at offset %d", split.toString(), theFetchSize, currentOffset)); final FetchRequest request = new FetchRequest(split.getPartition().getTopic(), split.getPartition() .getPartId(), currentOffset, theFetchSize); final ByteBufferMessageSet msg = consumer.fetch(request); final int errorCode = msg.getErrorCode(); if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { return false; } if (errorCode != ErrorMapping.NoError()) { ErrorMapping.maybeThrowException(errorCode); } // --> else we try to grab the next iterator currentMessageItr = msg.iterator(); currentOffset += msg.validBytes(); } return canCallNext(); }
Example 2
Source File: CuratorKafkaMonitor.java From Kafdrop with Apache License 2.0 | 5 votes |
private Integer offsetManagerBroker(BlockingChannel channel, String groupId) { final ConsumerMetadataRequest request = new ConsumerMetadataRequest(groupId, (short) 0, 0, clientId()); LOG.debug("Sending consumer metadata request: {}", request); channel.send(request); ConsumerMetadataResponse response = ConsumerMetadataResponse.readFrom(channel.receive().buffer()); LOG.debug("Received consumer metadata response: {}", response); return (response.errorCode() == ErrorMapping.NoError()) ? response.coordinator().id() : null; }
Example 3
Source File: Kafka08PartitionDiscoverer.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override protected List<String> getAllTopics() { List<String> topics = new LinkedList<>(); retryLoop: for (int retry = 0; retry < numRetries; retry++) { brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) { LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries); try { // clear in case we have an incomplete list from previous tries topics.clear(); for (TopicMetadata item : consumer.send(new TopicMetadataRequest(Collections.<String>emptyList())).topicsMetadata()) { if (item.errorCode() != ErrorMapping.NoError()) { // warn and try more brokers LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.", seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage()); useNextAddressAsNewContactSeedBroker(); continue brokersLoop; } topics.add(item.topic()); } break retryLoop; // leave the loop through the brokers } catch (Exception e) { //validates seed brokers in case of a ClosedChannelException validateSeedBrokers(seedBrokerAddresses, e); LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}", seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage()); LOG.debug("Detailed trace", e); // we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata try { Thread.sleep(500); } catch (InterruptedException e1) { // sleep shorter. } useNextAddressAsNewContactSeedBroker(); } } // brokers loop } // retries loop return topics; }
Example 4
Source File: Kafka08PartitionDiscoverer.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Send request to Kafka to get partitions for topics. * * @param topics The name of the topics. */ public List<KafkaTopicPartitionLeader> getPartitionLeadersForTopics(List<String> topics) { List<KafkaTopicPartitionLeader> partitions = new LinkedList<>(); retryLoop: for (int retry = 0; retry < numRetries; retry++) { brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) { LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries); try { // clear in case we have an incomplete list from previous tries partitions.clear(); for (TopicMetadata item : consumer.send(new TopicMetadataRequest(topics)).topicsMetadata()) { if (item.errorCode() != ErrorMapping.NoError()) { // warn and try more brokers LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.", seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage()); useNextAddressAsNewContactSeedBroker(); continue brokersLoop; } if (!topics.contains(item.topic())) { LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ..."); useNextAddressAsNewContactSeedBroker(); continue brokersLoop; } for (PartitionMetadata part : item.partitionsMetadata()) { Node leader = brokerToNode(part.leader()); KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId()); KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader); partitions.add(pInfo); } } break retryLoop; // leave the loop through the brokers } catch (Exception e) { //validates seed brokers in case of a ClosedChannelException validateSeedBrokers(seedBrokerAddresses, e); LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}", seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage()); LOG.debug("Detailed trace", e); // we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata try { Thread.sleep(500); } catch (InterruptedException e1) { // sleep shorter. } useNextAddressAsNewContactSeedBroker(); } } // brokers loop } // retries loop return partitions; }
Example 5
Source File: Kafka08PartitionDiscoverer.java From flink with Apache License 2.0 | 4 votes |
@Override protected List<String> getAllTopics() { List<String> topics = new LinkedList<>(); retryLoop: for (int retry = 0; retry < numRetries; retry++) { brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) { LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries); try { // clear in case we have an incomplete list from previous tries topics.clear(); for (TopicMetadata item : consumer.send(new TopicMetadataRequest(Collections.<String>emptyList())).topicsMetadata()) { if (item.errorCode() != ErrorMapping.NoError()) { // warn and try more brokers LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.", seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage()); useNextAddressAsNewContactSeedBroker(); continue brokersLoop; } topics.add(item.topic()); } break retryLoop; // leave the loop through the brokers } catch (Exception e) { //validates seed brokers in case of a ClosedChannelException validateSeedBrokers(seedBrokerAddresses, e); LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}", seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage()); LOG.debug("Detailed trace", e); // we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata try { Thread.sleep(500); } catch (InterruptedException e1) { // sleep shorter. } useNextAddressAsNewContactSeedBroker(); } } // brokers loop } // retries loop return topics; }
Example 6
Source File: Kafka08PartitionDiscoverer.java From flink with Apache License 2.0 | 4 votes |
/** * Send request to Kafka to get partitions for topics. * * @param topics The name of the topics. */ public List<KafkaTopicPartitionLeader> getPartitionLeadersForTopics(List<String> topics) { List<KafkaTopicPartitionLeader> partitions = new LinkedList<>(); retryLoop: for (int retry = 0; retry < numRetries; retry++) { brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) { LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries); try { // clear in case we have an incomplete list from previous tries partitions.clear(); for (TopicMetadata item : consumer.send(new TopicMetadataRequest(topics)).topicsMetadata()) { if (item.errorCode() != ErrorMapping.NoError()) { // warn and try more brokers LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.", seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage()); useNextAddressAsNewContactSeedBroker(); continue brokersLoop; } if (!topics.contains(item.topic())) { LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ..."); useNextAddressAsNewContactSeedBroker(); continue brokersLoop; } for (PartitionMetadata part : item.partitionsMetadata()) { Node leader = brokerToNode(part.leader()); KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId()); KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader); partitions.add(pInfo); } } break retryLoop; // leave the loop through the brokers } catch (Exception e) { //validates seed brokers in case of a ClosedChannelException validateSeedBrokers(seedBrokerAddresses, e); LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}", seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage()); LOG.debug("Detailed trace", e); // we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata try { Thread.sleep(500); } catch (InterruptedException e1) { // sleep shorter. } useNextAddressAsNewContactSeedBroker(); } } // brokers loop } // retries loop return partitions; }
Example 7
Source File: PulsarOffsetResponse.java From pulsar with Apache License 2.0 | 4 votes |
@Override public short errorCode(String topic, int partition) { return ErrorMapping.NoError(); }