kafka.javaapi.TopicMetadataResponse Java Examples
The following examples show how to use
kafka.javaapi.TopicMetadataResponse.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: LegacyKafkaClient.java From secor with Apache License 2.0 | 6 votes |
@Override public int getNumPartitions(String topic) { SimpleConsumer consumer = null; try { consumer = createConsumer( mConfig.getKafkaSeedBrokerHost(), mConfig.getKafkaSeedBrokerPort(), "partitionLookup"); List<String> topics = new ArrayList<String>(); topics.add(topic); TopicMetadataRequest request = new TopicMetadataRequest(topics); TopicMetadataResponse response = consumer.send(request); if (response.topicsMetadata().size() != 1) { throw new RuntimeException("Expected one metadata for topic " + topic + " found " + response.topicsMetadata().size()); } TopicMetadata topicMetadata = response.topicsMetadata().get(0); return topicMetadata.partitionsMetadata().size(); } finally { if (consumer != null) { consumer.close(); } } }
Example #2
Source File: KafkaTool.java From Scribengin with GNU Affero General Public License v3.0 | 6 votes |
public TopicMetadata findTopicMetadata(final String topic, int retries) throws Exception { Operation<TopicMetadata> findTopicOperation = new Operation<TopicMetadata>() { @Override public TopicMetadata execute() throws Exception { List<String> topics = Collections.singletonList(topic); TopicMetadataRequest req = new TopicMetadataRequest(topics); TopicMetadataResponse resp = consumer.send(req); List<TopicMetadata> topicMetadatas = resp.topicsMetadata(); if (topicMetadatas.size() != 1) { throw new Exception("Expect to find 1 topic " + topic + ", but found " + topicMetadatas.size()); } return topicMetadatas.get(0); } }; return execute(findTopicOperation, retries); }
Example #3
Source File: KafkaUtils.java From kafka-monitor with Apache License 2.0 | 5 votes |
public TopicMetadataResponse topicMetadataRequest(BlockingChannel channel, String[] topics) { TopicMetadataRequest request = new TopicMetadataRequest((short) 0, 0, "kafkaMonitor", Arrays.asList(topics)); channel.send(request); final kafka.api.TopicMetadataResponse underlyingResponse = kafka.api.TopicMetadataResponse.readFrom(channel.receive().payload()); TopicMetadataResponse response = new TopicMetadataResponse(underlyingResponse); return response; }
Example #4
Source File: LegacyKafkaClient.java From secor with Apache License 2.0 | 5 votes |
private HostAndPort findLeader(TopicPartition topicPartition) { SimpleConsumer consumer = null; try { LOG.debug("looking up leader for topic {} partition {}", topicPartition.getTopic(), topicPartition.getPartition()); consumer = createConsumer( mConfig.getKafkaSeedBrokerHost(), mConfig.getKafkaSeedBrokerPort(), "leaderLookup"); List<String> topics = new ArrayList<String>(); topics.add(topicPartition.getTopic()); TopicMetadataRequest request = new TopicMetadataRequest(topics); TopicMetadataResponse response = consumer.send(request); List<TopicMetadata> metaData = response.topicsMetadata(); for (TopicMetadata item : metaData) { for (PartitionMetadata part : item.partitionsMetadata()) { if (part.partitionId() == topicPartition.getPartition()) { return HostAndPort.fromParts(part.leader().host(), part.leader().port()); } } } } finally { if (consumer != null) { consumer.close(); } } return null; }
Example #5
Source File: KafkaTopicService.java From Decision with Apache License 2.0 | 5 votes |
@Override public Integer getNumPartitionsForTopic(String topic){ TopicMetadataRequest topicRequest = new TopicMetadataRequest(Arrays.asList(topic)); TopicMetadataResponse topicResponse = simpleConsumer.send(topicRequest); for (TopicMetadata topicMetadata : topicResponse.topicsMetadata()) { if (topic.equals(topicMetadata.topic())) { int partitionSize = topicMetadata.partitionsMetadata().size(); logger.debug("Partition size found ({}) for {} topic", partitionSize, topic); return partitionSize; } } logger.warn("Metadata info not found!. TOPIC {}", topic); return null; }
Example #6
Source File: KafkaPartitionLevelConsumerTest.java From incubator-pinot with Apache License 2.0 | 5 votes |
@Override public TopicMetadataResponse send(TopicMetadataRequest request) { java.util.List<String> topics = request.topics(); TopicMetadata[] topicMetadataArray = new TopicMetadata[topics.size()]; for (int i = 0; i < topicMetadataArray.length; i++) { String topic = topics.get(i); if (!topic.equals(topicName)) { topicMetadataArray[i] = new TopicMetadata(topic, null, Errors.UNKNOWN_TOPIC_OR_PARTITION.code()); } else { PartitionMetadata[] partitionMetadataArray = new PartitionMetadata[partitionCount]; for (int j = 0; j < partitionCount; j++) { java.util.List<BrokerEndPoint> emptyJavaList = Collections.emptyList(); List<BrokerEndPoint> emptyScalaList = JavaConversions.asScalaBuffer(emptyJavaList).toList(); partitionMetadataArray[j] = new PartitionMetadata(j, Some.apply(brokerArray[partitionLeaderIndices[j]]), emptyScalaList, emptyScalaList, Errors.NONE.code()); } Seq<PartitionMetadata> partitionsMetadata = List.fromArray(partitionMetadataArray); topicMetadataArray[i] = new TopicMetadata(topic, partitionsMetadata, Errors.NONE.code()); } } Seq<BrokerEndPoint> brokers = List.fromArray(brokerArray); Seq<TopicMetadata> topicsMetadata = List.fromArray(topicMetadataArray); return new TopicMetadataResponse(new kafka.api.TopicMetadataResponse(brokers, topicsMetadata, -1)); }
Example #7
Source File: KafkaStreamReader.java From arcusplatform with Apache License 2.0 | 4 votes |
private List<TopicMetadata> getMetadata(SimpleConsumer consumer) { TopicMetadataRequest request = new TopicMetadataRequest(ImmutableList.copyOf(config.getTopics())); TopicMetadataResponse response = consumer.send(request); return response.topicsMetadata(); }
Example #8
Source File: KafkaStreamMetadataProvider.java From incubator-pinot with Apache License 2.0 | 4 votes |
/** * Fetches the number of partitions for this kafka stream * @param timeoutMillis * @return */ @Override public synchronized int fetchPartitionCount(long timeoutMillis) { int unknownTopicReplyCount = 0; final int MAX_UNKNOWN_TOPIC_REPLY_COUNT = 10; int kafkaErrorCount = 0; final int MAX_KAFKA_ERROR_COUNT = 10; final long endTime = System.currentTimeMillis() + timeoutMillis; while (System.currentTimeMillis() < endTime) { // Try to get into a state where we're connected to Kafka while (!_currentState.isConnectedToKafkaBroker() && System.currentTimeMillis() < endTime) { _currentState.process(); } if (endTime <= System.currentTimeMillis() && !_currentState.isConnectedToKafkaBroker()) { throw new TimeoutException( "Failed to get the partition count for topic " + _topic + " within " + timeoutMillis + " ms"); } // Send the metadata request to Kafka TopicMetadataResponse topicMetadataResponse = null; try { topicMetadataResponse = _simpleConsumer.send(new TopicMetadataRequest(Collections.singletonList(_topic))); } catch (Exception e) { _currentState.handleConsumerException(e); continue; } final TopicMetadata topicMetadata = topicMetadataResponse.topicsMetadata().get(0); final short errorCode = topicMetadata.errorCode(); if (errorCode == Errors.NONE.code()) { return topicMetadata.partitionsMetadata().size(); } else if (errorCode == Errors.LEADER_NOT_AVAILABLE.code()) { // If there is no leader, it'll take some time for a new leader to be elected, wait 100 ms before retrying Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); } else if (errorCode == Errors.INVALID_TOPIC_EXCEPTION.code()) { throw new RuntimeException("Invalid topic name " + _topic); } else if (errorCode == Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) { if (MAX_UNKNOWN_TOPIC_REPLY_COUNT < unknownTopicReplyCount) { throw new RuntimeException("Topic " + _topic + " does not exist"); } else { // Kafka topic creation can sometimes take some time, so we'll retry after a little bit unknownTopicReplyCount++; Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); } } else { // Retry after a short delay kafkaErrorCount++; if (MAX_KAFKA_ERROR_COUNT < kafkaErrorCount) { throw exceptionForKafkaErrorCode(errorCode); } Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); } } throw new TimeoutException(); }