Java Code Examples for kafka.javaapi.TopicMetadata#errorCode()
The following examples show how to use
kafka.javaapi.TopicMetadata#errorCode() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaValidationUtil08.java From datacollector with Apache License 2.0 | 5 votes |
@Override public int getPartitionCount( String metadataBrokerList, String topic, Map<String, Object> kafkaClientConfigs, int messageSendMaxRetries, long retryBackoffMs ) throws StageException { List<HostAndPort> kafkaBrokers = getKafkaBrokers(metadataBrokerList); TopicMetadata topicMetadata; try { topicMetadata = KafkaValidationUtil08.getTopicMetadata( kafkaBrokers, topic, messageSendMaxRetries, retryBackoffMs ); if (topicMetadata == null) { // Could not get topic metadata from any of the supplied brokers throw new StageException(KafkaErrors.KAFKA_03, topic, metadataBrokerList); } if (topicMetadata.errorCode() == ErrorMapping.UnknownTopicOrPartitionCode()) { // Topic does not exist throw new StageException(KafkaErrors.KAFKA_04, topic); } if (topicMetadata.errorCode() != 0) { // Topic metadata returned error code other than ErrorMapping.UnknownTopicOrPartitionCode() throw new StageException(KafkaErrors.KAFKA_03, topic, metadataBrokerList); } } catch (IOException e) { LOG.error(KafkaErrors.KAFKA_11.getMessage(), topic, kafkaBrokers, e.toString(), e); throw new StageException(KafkaErrors.KAFKA_11, topic, kafkaBrokers, e.toString()); } return topicMetadata.partitionsMetadata().size(); }
Example 2
Source File: Kafka08PartitionDiscoverer.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override protected List<String> getAllTopics() { List<String> topics = new LinkedList<>(); retryLoop: for (int retry = 0; retry < numRetries; retry++) { brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) { LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries); try { // clear in case we have an incomplete list from previous tries topics.clear(); for (TopicMetadata item : consumer.send(new TopicMetadataRequest(Collections.<String>emptyList())).topicsMetadata()) { if (item.errorCode() != ErrorMapping.NoError()) { // warn and try more brokers LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.", seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage()); useNextAddressAsNewContactSeedBroker(); continue brokersLoop; } topics.add(item.topic()); } break retryLoop; // leave the loop through the brokers } catch (Exception e) { //validates seed brokers in case of a ClosedChannelException validateSeedBrokers(seedBrokerAddresses, e); LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}", seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage()); LOG.debug("Detailed trace", e); // we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata try { Thread.sleep(500); } catch (InterruptedException e1) { // sleep shorter. } useNextAddressAsNewContactSeedBroker(); } } // brokers loop } // retries loop return topics; }
Example 3
Source File: Kafka08PartitionDiscoverer.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Send request to Kafka to get partitions for topics. * * @param topics The name of the topics. */ public List<KafkaTopicPartitionLeader> getPartitionLeadersForTopics(List<String> topics) { List<KafkaTopicPartitionLeader> partitions = new LinkedList<>(); retryLoop: for (int retry = 0; retry < numRetries; retry++) { brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) { LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries); try { // clear in case we have an incomplete list from previous tries partitions.clear(); for (TopicMetadata item : consumer.send(new TopicMetadataRequest(topics)).topicsMetadata()) { if (item.errorCode() != ErrorMapping.NoError()) { // warn and try more brokers LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.", seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage()); useNextAddressAsNewContactSeedBroker(); continue brokersLoop; } if (!topics.contains(item.topic())) { LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ..."); useNextAddressAsNewContactSeedBroker(); continue brokersLoop; } for (PartitionMetadata part : item.partitionsMetadata()) { Node leader = brokerToNode(part.leader()); KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId()); KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader); partitions.add(pInfo); } } break retryLoop; // leave the loop through the brokers } catch (Exception e) { //validates seed brokers in case of a ClosedChannelException validateSeedBrokers(seedBrokerAddresses, e); LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}", seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage()); LOG.debug("Detailed trace", e); // we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata try { Thread.sleep(500); } catch (InterruptedException e1) { // sleep shorter. } useNextAddressAsNewContactSeedBroker(); } } // brokers loop } // retries loop return partitions; }
Example 4
Source File: Kafka08PartitionDiscoverer.java From flink with Apache License 2.0 | 4 votes |
@Override protected List<String> getAllTopics() { List<String> topics = new LinkedList<>(); retryLoop: for (int retry = 0; retry < numRetries; retry++) { brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) { LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries); try { // clear in case we have an incomplete list from previous tries topics.clear(); for (TopicMetadata item : consumer.send(new TopicMetadataRequest(Collections.<String>emptyList())).topicsMetadata()) { if (item.errorCode() != ErrorMapping.NoError()) { // warn and try more brokers LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.", seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage()); useNextAddressAsNewContactSeedBroker(); continue brokersLoop; } topics.add(item.topic()); } break retryLoop; // leave the loop through the brokers } catch (Exception e) { //validates seed brokers in case of a ClosedChannelException validateSeedBrokers(seedBrokerAddresses, e); LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}", seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage()); LOG.debug("Detailed trace", e); // we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata try { Thread.sleep(500); } catch (InterruptedException e1) { // sleep shorter. } useNextAddressAsNewContactSeedBroker(); } } // brokers loop } // retries loop return topics; }
Example 5
Source File: Kafka08PartitionDiscoverer.java From flink with Apache License 2.0 | 4 votes |
/** * Send request to Kafka to get partitions for topics. * * @param topics The name of the topics. */ public List<KafkaTopicPartitionLeader> getPartitionLeadersForTopics(List<String> topics) { List<KafkaTopicPartitionLeader> partitions = new LinkedList<>(); retryLoop: for (int retry = 0; retry < numRetries; retry++) { brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) { LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries); try { // clear in case we have an incomplete list from previous tries partitions.clear(); for (TopicMetadata item : consumer.send(new TopicMetadataRequest(topics)).topicsMetadata()) { if (item.errorCode() != ErrorMapping.NoError()) { // warn and try more brokers LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.", seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage()); useNextAddressAsNewContactSeedBroker(); continue brokersLoop; } if (!topics.contains(item.topic())) { LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ..."); useNextAddressAsNewContactSeedBroker(); continue brokersLoop; } for (PartitionMetadata part : item.partitionsMetadata()) { Node leader = brokerToNode(part.leader()); KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId()); KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader); partitions.add(pInfo); } } break retryLoop; // leave the loop through the brokers } catch (Exception e) { //validates seed brokers in case of a ClosedChannelException validateSeedBrokers(seedBrokerAddresses, e); LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}", seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage()); LOG.debug("Detailed trace", e); // we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata try { Thread.sleep(500); } catch (InterruptedException e1) { // sleep shorter. } useNextAddressAsNewContactSeedBroker(); } } // brokers loop } // retries loop return partitions; }
Example 6
Source File: KafkaValidationUtil08.java From datacollector with Apache License 2.0 | 4 votes |
@Override public boolean validateTopicExistence( Stage.Context context, String groupName, String configName, List<HostAndPort> kafkaBrokers, String metadataBrokerList, String topic, Map<String, Object> kafkaClientConfigs, List<Stage.ConfigIssue> issues, boolean producer ) { boolean valid = true; if(topic == null || topic.isEmpty()) { issues.add(context.createConfigIssue(groupName, configName, KafkaErrors.KAFKA_05)); valid = false; } else { TopicMetadata topicMetadata; try { topicMetadata = KafkaValidationUtil08.getTopicMetadata(kafkaBrokers, topic, 1, 0); if(topicMetadata == null) { //Could not get topic metadata from any of the supplied brokers issues.add( context.createConfigIssue( groupName, KAFKA_CONFIG_BEAN_PREFIX + "topic", KafkaErrors.KAFKA_03, topic, metadataBrokerList ) ); valid = false; } else if (topicMetadata.errorCode() == ErrorMapping.UnknownTopicOrPartitionCode()) { //Topic does not exist issues.add( context.createConfigIssue( groupName, KAFKA_CONFIG_BEAN_PREFIX + "topic", KafkaErrors.KAFKA_04, topic ) ); valid = false; } else if (topicMetadata.errorCode() != 0) { // Topic metadata returned error code other than ErrorMapping.UnknownTopicOrPartitionCode() issues.add( context.createConfigIssue( groupName, KAFKA_CONFIG_BEAN_PREFIX + "topic", KafkaErrors.KAFKA_03, topic, metadataBrokerList ) ); valid = false; } } catch (IOException e) { //Could not connect to kafka with the given metadata broker list issues.add( context.createConfigIssue( groupName, KAFKA_CONFIG_BEAN_PREFIX + "metadataBrokerList", KafkaErrors.KAFKA_67, metadataBrokerList ) ); valid = false; } } return valid; }
Example 7
Source File: KafkaStreamMetadataProvider.java From incubator-pinot with Apache License 2.0 | 4 votes |
/** * Fetches the number of partitions for this kafka stream * @param timeoutMillis * @return */ @Override public synchronized int fetchPartitionCount(long timeoutMillis) { int unknownTopicReplyCount = 0; final int MAX_UNKNOWN_TOPIC_REPLY_COUNT = 10; int kafkaErrorCount = 0; final int MAX_KAFKA_ERROR_COUNT = 10; final long endTime = System.currentTimeMillis() + timeoutMillis; while (System.currentTimeMillis() < endTime) { // Try to get into a state where we're connected to Kafka while (!_currentState.isConnectedToKafkaBroker() && System.currentTimeMillis() < endTime) { _currentState.process(); } if (endTime <= System.currentTimeMillis() && !_currentState.isConnectedToKafkaBroker()) { throw new TimeoutException( "Failed to get the partition count for topic " + _topic + " within " + timeoutMillis + " ms"); } // Send the metadata request to Kafka TopicMetadataResponse topicMetadataResponse = null; try { topicMetadataResponse = _simpleConsumer.send(new TopicMetadataRequest(Collections.singletonList(_topic))); } catch (Exception e) { _currentState.handleConsumerException(e); continue; } final TopicMetadata topicMetadata = topicMetadataResponse.topicsMetadata().get(0); final short errorCode = topicMetadata.errorCode(); if (errorCode == Errors.NONE.code()) { return topicMetadata.partitionsMetadata().size(); } else if (errorCode == Errors.LEADER_NOT_AVAILABLE.code()) { // If there is no leader, it'll take some time for a new leader to be elected, wait 100 ms before retrying Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); } else if (errorCode == Errors.INVALID_TOPIC_EXCEPTION.code()) { throw new RuntimeException("Invalid topic name " + _topic); } else if (errorCode == Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) { if (MAX_UNKNOWN_TOPIC_REPLY_COUNT < unknownTopicReplyCount) { throw new RuntimeException("Topic " + _topic + " does not exist"); } else { // Kafka topic creation can sometimes take some time, so we'll retry after a little bit unknownTopicReplyCount++; Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); } } else { // Retry after a short delay kafkaErrorCount++; if (MAX_KAFKA_ERROR_COUNT < kafkaErrorCount) { throw exceptionForKafkaErrorCode(errorCode); } Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); } } throw new TimeoutException(); }