org.apache.kafka.clients.admin.TopicDescription Java Examples
The following examples show how to use
org.apache.kafka.clients.admin.TopicDescription.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaAdminClient.java From common-kafka with Apache License 2.0 | 6 votes |
/** * Returns the replication factor for the given topic * * @param topic * a Kafka topic * @return the replication factor for the given topic * * @throws IllegalArgumentException * if topic is null, empty or blank * @throws AdminOperationException * if there is an issue retrieving the replication factor */ public int getTopicReplicationFactor(String topic) { if (StringUtils.isBlank(topic)) throw new IllegalArgumentException("topic cannot be null, empty or blank"); LOG.debug("Getting replication factor for topic [{}]", topic); Collection<TopicDescription> topicDescription = getTopicDescriptions(Collections.singleton(topic)); if (topicDescription.isEmpty()) { throw new AdminOperationException("Unable to get description for topic: " + topic); } List<TopicPartitionInfo> topicPartitions = topicDescription.iterator().next().partitions(); if (topicPartitions.isEmpty()) { throw new AdminOperationException("Unable to get partitions for topic: " + topic); } return topicPartitions.get(0).replicas().size(); }
Example #2
Source File: TopicServiceImpl.java From kafka-helmsman with MIT License | 6 votes |
/** * Transform a TopicDescription instance to ConfiguredTopic instance. * * @param td an instance of TopicDescription * @param ktc a topic config future * @return an instance of ConfiguredTopic */ static ConfiguredTopic configuredTopic(TopicDescription td, KafkaFuture<Config> ktc) { int partitions = td.partitions().size(); short replication = (short) td.partitions().iterator().next().replicas().size(); try { Config tc = ktc.get(); Map<String, String> configMap = tc .entries() .stream() .filter(TopicServiceImpl::isNonDefault) .collect(toMap(ConfigEntry::name, ConfigEntry::value)); return new ConfiguredTopic(td.name(), partitions, replication, configMap); } catch (InterruptedException | ExecutionException e) { // TODO: FA-10109: Improve exception handling throw new RuntimeException(e); } }
Example #3
Source File: KafkaRollerTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Override protected KafkaAvailability availability(Admin ac) { return new KafkaAvailability(null) { @Override protected Future<Set<String>> topicNames() { return succeededFuture(Collections.emptySet()); } @Override protected Future<Collection<TopicDescription>> describeTopics(Set<String> names) { return succeededFuture(Collections.emptySet()); } @Override Future<Boolean> canRoll(int podId) { return canRollFn.apply(podId); } }; }
Example #4
Source File: KafkaImpl.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
/** * Completes the returned Future on the Vertx event loop * with the topic config obtained from the Kafka AdminClient API. * The Future completes with a null result a topic with the given {@code topicName} does not exist. */ @Override public Future<TopicMetadata> topicMetadata(TopicName topicName) { LOGGER.debug("Getting metadata for topic {}", topicName); ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, topicName.toString()); Future<TopicDescription> topicDescriptionFuture = mapFuture(adminClient.describeTopics( singleton(topicName.toString())).values().get(topicName.toString())); Future<Config> configFuture = mapFuture(adminClient.describeConfigs( singleton(resource)).values().get(resource)); return CompositeFuture.all(topicDescriptionFuture, configFuture) .map(compositeFuture -> new TopicMetadata(compositeFuture.resultAt(0), compositeFuture.resultAt(1))) .recover(error -> { if (error instanceof UnknownTopicOrPartitionException) { return Future.succeededFuture(null); } else { return Future.failedFuture(error); } }); }
Example #5
Source File: TestKafkaSystemAdminJava.java From samza with Apache License 2.0 | 6 votes |
@Test public void testClearStream() { StreamSpec spec = new StreamSpec("testId", "testStreamClear", "testSystem", 8); KafkaSystemAdmin admin = systemAdmin(); String topicName = spec.getPhysicalName(); assertTrue("createStream should return true if the stream does not exist and then is created.", admin.createStream(spec)); // validate topic exists assertTrue(admin.clearStream(spec)); // validate that topic was removed DescribeTopicsResult dtr = admin.adminClient.describeTopics(ImmutableSet.of(topicName)); try { TopicDescription td = dtr.all().get().get(topicName); Assert.fail("topic " + topicName + " should've been removed. td=" + td); } catch (Exception e) { if (!(e.getCause() instanceof org.apache.kafka.common.errors.UnknownTopicOrPartitionException)) { Assert.fail("topic " + topicName + " should've been removed. Expected UnknownTopicOrPartitionException."); } } }
Example #6
Source File: ConsumeService.java From kafka-monitor with Apache License 2.0 | 6 votes |
@Override public synchronized void start() { if (_running.compareAndSet(false, true)) { _consumeThread.start(); LOG.info("{}/ConsumeService started.", _name); Sensor topicPartitionCount = metrics.sensor("topic-partitions"); DescribeTopicsResult describeTopicsResult = _adminClient.describeTopics(Collections.singleton(_topic)); Map<String, KafkaFuture<TopicDescription>> topicResultValues = describeTopicsResult.values(); KafkaFuture<TopicDescription> topicDescriptionKafkaFuture = topicResultValues.get(_topic); TopicDescription topicDescription = null; try { topicDescription = topicDescriptionKafkaFuture.get(); } catch (InterruptedException | ExecutionException e) { LOG.error("Exception occurred while getting the topicDescriptionKafkaFuture for topic: {}", _topic, e); } @SuppressWarnings("ConstantConditions") double partitionCount = topicDescription.partitions().size(); topicPartitionCount.add( new MetricName("topic-partitions-count", METRIC_GROUP_NAME, "The total number of partitions for the topic.", tags), new Total(partitionCount)); } }
Example #7
Source File: AdminClientWrapper.java From hdinsight-kafka-java-get-started with MIT License | 6 votes |
public static void describeTopics(String brokers, String topicName) throws IOException { // Set properties used to configure admin client Properties properties = getProperties(brokers); try (final AdminClient adminClient = KafkaAdminClient.create(properties)) { // Make async call to describe the topic. final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singleton(topicName)); TopicDescription description = describeTopicsResult.values().get(topicName).get(); System.out.print(description.toString()); } catch (Exception e) { System.out.print("Describe denied\n"); System.out.print(e.getMessage()); //throw new RuntimeException(e.getMessage(), e); } }
Example #8
Source File: KafkaAdmin.java From feeyo-redisproxy with BSD 3-Clause "New" or "Revised" License | 6 votes |
public Map<String, TopicDescription> getTopicAndDescriptions() throws Exception { try { // 查询topic ListTopicsOptions lto = new ListTopicsOptions(); lto.timeoutMs(10 * 1000); ListTopicsResult ltr = adminClient.listTopics(lto); // 查询topic配置信息 DescribeTopicsOptions dto = new DescribeTopicsOptions(); dto.timeoutMs(15 * 1000); DescribeTopicsResult dtr = adminClient.describeTopics(ltr.names().get(), dto); return dtr.all().get(); } catch (Exception e) { throw e; } }
Example #9
Source File: KafkaTestUtils.java From kafka-junit with BSD 3-Clause "New" or "Revised" License | 6 votes |
/** * This will consume all records from all partitions on the given topic. * @param <K> Type of key values. * @param <V> Type of message values. * @param topic Topic to consume from. * @param keyDeserializer How to deserialize the key values. * @param valueDeserializer How to deserialize the messages. * @return List of ConsumerRecords consumed. */ public <K, V> List<ConsumerRecord<K, V>> consumeAllRecordsFromTopic( final String topic, final Class<? extends Deserializer<K>> keyDeserializer, final Class<? extends Deserializer<V>> valueDeserializer ) { // Find all partitions on topic. final TopicDescription topicDescription = describeTopic(topic); final Collection<Integer> partitions = topicDescription .partitions() .stream() .map(TopicPartitionInfo::partition) .collect(Collectors.toList()); // Consume messages return consumeAllRecordsFromTopic(topic, partitions, keyDeserializer, valueDeserializer); }
Example #10
Source File: KafkaTopicsList.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
private static Map<String, TopicDescription> filterKsqlInternalTopics( Map<String, TopicDescription> kafkaTopicDescriptions, KsqlConfig ksqlConfig ) { Map<String, TopicDescription> filteredKafkaTopics = new HashMap<>(); String serviceId = KsqlConstants.KSQL_INTERNAL_TOPIC_PREFIX + ksqlConfig.get(KsqlConfig.KSQL_SERVICE_ID_CONFIG).toString(); String persistentQueryPrefix = ksqlConfig.get( KsqlConfig.KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG ).toString(); String transientQueryPrefix = ksqlConfig.get( KsqlConfig.KSQL_TRANSIENT_QUERY_NAME_PREFIX_CONFIG ).toString(); for (Map.Entry<String, TopicDescription> entry : kafkaTopicDescriptions.entrySet()) { if (!entry.getKey().startsWith(serviceId + persistentQueryPrefix) && !entry.getKey().startsWith(serviceId + transientQueryPrefix)) { filteredKafkaTopics.put(entry.getKey().toLowerCase(), entry.getValue()); } } return filteredKafkaTopics; }
Example #11
Source File: CruiseControlMetricsReporterTest.java From cruise-control with BSD 2-Clause "Simplified" License | 6 votes |
@Test public void testUpdatingMetricsTopicConfig() throws ExecutionException, InterruptedException { Properties props = new Properties(); setSecurityConfigs(props, "admin"); props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()); AdminClient adminClient = AdminClient.create(props); TopicDescription topicDescription = adminClient.describeTopics(Collections.singleton(TOPIC)).values().get(TOPIC).get(); assertEquals(1, topicDescription.partitions().size()); // Shutdown broker _brokers.get(0).shutdown(); // Change broker config Map<Object, Object> brokerConfig = buildBrokerConfigs().get(0); brokerConfig.put(CRUISE_CONTROL_METRICS_TOPIC_AUTO_CREATE_CONFIG, "true"); brokerConfig.put(CRUISE_CONTROL_METRICS_TOPIC_NUM_PARTITIONS_CONFIG, "2"); brokerConfig.put(CRUISE_CONTROL_METRICS_TOPIC_REPLICATION_FACTOR_CONFIG, "1"); CCEmbeddedBroker broker = new CCEmbeddedBroker(brokerConfig); // Restart broker broker.startup(); // Wait for broker to boot up Thread.sleep(5000); // Check whether the topic config is updated topicDescription = adminClient.describeTopics(Collections.singleton(TOPIC)).values().get(TOPIC).get(); assertEquals(2, topicDescription.partitions().size()); }
Example #12
Source File: CruiseControlMetricsReporter.java From cruise-control with BSD 2-Clause "Simplified" License | 6 votes |
protected void maybeIncreaseTopicPartitionCount() { String cruiseControlMetricsTopic = _metricsTopic.name(); try { // Retrieve topic partition count to check and update. TopicDescription topicDescription = _adminClient.describeTopics(Collections.singletonList(cruiseControlMetricsTopic)).values() .get(cruiseControlMetricsTopic).get(CLIENT_REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS); if (topicDescription.partitions().size() < _metricsTopic.numPartitions()) { _adminClient.createPartitions(Collections.singletonMap(cruiseControlMetricsTopic, NewPartitions.increaseTo(_metricsTopic.numPartitions()))); } } catch (InterruptedException | ExecutionException | TimeoutException e) { LOG.warn("Partition count increase to {} for topic {} failed{}.", _metricsTopic.numPartitions(), cruiseControlMetricsTopic, (e.getCause() instanceof ReassignmentInProgressException) ? " due to ongoing reassignment" : "", e); } }
Example #13
Source File: KafkaTopicClientImplIntegrationTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
@Test public void shouldCreateTopicWithConfig() { // Given: final String topicName = UUID.randomUUID().toString(); final Map<String, String> config = ImmutableMap.of( TopicConfig.COMPRESSION_TYPE_CONFIG, "snappy"); // When: client.createTopic(topicName, 2, (short) 1, config); // Then: assertThatEventually(() -> topicExists(topicName), is(true)); final TopicDescription topicDescription = getTopicDescription(topicName); assertThat(topicDescription.partitions(), hasSize(2)); assertThat(topicDescription.partitions().get(0).replicas(), hasSize(1)); final Map<String, String> configs = client.getTopicConfig(topicName); assertThat(configs.get(TopicConfig.COMPRESSION_TYPE_CONFIG), is("snappy")); }
Example #14
Source File: Utils.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
public static TopicMetadata getTopicMetadata(Topic kubeTopic) { List<Node> nodes = new ArrayList<>(); for (int nodeId = 0; nodeId < kubeTopic.getNumReplicas(); nodeId++) { nodes.add(new Node(nodeId, "localhost", 9092 + nodeId)); } List<TopicPartitionInfo> partitions = new ArrayList<>(); for (int partitionId = 0; partitionId < kubeTopic.getNumPartitions(); partitionId++) { partitions.add(new TopicPartitionInfo(partitionId, nodes.get(0), nodes, nodes)); } List<ConfigEntry> configs = new ArrayList<>(); for (Map.Entry<String, String> entry: kubeTopic.getConfig().entrySet()) { configs.add(new ConfigEntry(entry.getKey(), entry.getValue())); } return new TopicMetadata(new TopicDescription(kubeTopic.getTopicName().toString(), false, partitions), new Config(configs)); }
Example #15
Source File: SamplingUtilsTest.java From cruise-control with BSD 2-Clause "Simplified" License | 6 votes |
@Test public void testMaybeIncreasePartitionCount() throws InterruptedException, ExecutionException, TimeoutException { AdminClient adminClient = EasyMock.createMock(AdminClient.class); NewTopic topicToAddPartitions = SamplingUtils.wrapTopic(MOCK_TOPIC, MOCK_DESIRED_PARTITION_COUNT, MOCK_REPLICATION_FACTOR, MOCK_DESIRED_RETENTION_MS); DescribeTopicsResult describeTopicsResult = EasyMock.createMock(DescribeTopicsResult.class); KafkaFuture<TopicDescription> topicDescriptionFuture = EasyMock.createMock(KafkaFuture.class); TopicDescription topicDescription = EasyMock.createMock(TopicDescription.class); Map<String, KafkaFuture<TopicDescription>> describeTopicsValues = Collections.singletonMap(MOCK_TOPIC, topicDescriptionFuture); Map<String, KafkaFuture<Void>> createPartitionsValues = Collections.singletonMap(MOCK_TOPIC, EasyMock.createMock(KafkaFuture.class)); CreatePartitionsResult createPartitionsResult = EasyMock.createMock(CreatePartitionsResult.class); EasyMock.expect(adminClient.describeTopics(Collections.singletonList(MOCK_TOPIC))).andReturn(describeTopicsResult); EasyMock.expect(describeTopicsResult.values()).andReturn(describeTopicsValues); EasyMock.expect(topicDescriptionFuture.get(CLIENT_REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS)).andReturn(topicDescription); EasyMock.expect(topicDescription.partitions()).andReturn(MOCK_PARTITIONS); EasyMock.expect(adminClient.createPartitions(Collections.singletonMap(MOCK_TOPIC, EasyMock.anyObject()))) .andReturn(createPartitionsResult); EasyMock.expect(createPartitionsResult.values()).andReturn(createPartitionsValues); EasyMock.replay(adminClient, describeTopicsResult, topicDescriptionFuture, topicDescription, createPartitionsResult); boolean increasePartitionCount = SamplingUtils.maybeIncreasePartitionCount(adminClient, topicToAddPartitions); EasyMock.verify(adminClient, describeTopicsResult, topicDescriptionFuture, topicDescription, createPartitionsResult); assertTrue(increasePartitionCount); }
Example #16
Source File: AdminClientWrapper.java From hdinsight-kafka-java-get-started with MIT License | 6 votes |
public static void describeTopics(String brokers, String topicName) throws IOException { // Set properties used to configure admin client Properties properties = getProperties(brokers); try (final AdminClient adminClient = KafkaAdminClient.create(properties)) { // Make async call to describe the topic. final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singleton(topicName)); TopicDescription description = describeTopicsResult.values().get(topicName).get(); System.out.print(description.toString()); } catch (Exception e) { System.out.print("Describe denied\n"); System.out.print(e.getMessage()); //throw new RuntimeException(e.getMessage(), e); } }
Example #17
Source File: KafkaBinderTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
private int invokePartitionSize(String topic) throws Throwable { DescribeTopicsResult describeTopicsResult = adminClient .describeTopics(Collections.singletonList(topic)); KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult.all(); Map<String, TopicDescription> stringTopicDescriptionMap = all .get(DEFAULT_OPERATION_TIMEOUT, TimeUnit.SECONDS); TopicDescription topicDescription = stringTopicDescriptionMap.get(topic); return topicDescription.partitions().size(); }
Example #18
Source File: KafkaAdmin.java From feeyo-redisproxy with BSD 3-Clause "New" or "Revised" License | 5 votes |
/** * 获取指定topic的配置信息 */ public TopicDescription getDescriptionByTopicName(String topic) throws Exception { List<String> topics = new ArrayList<String>(); topics.add(topic); DescribeTopicsOptions dto = new DescribeTopicsOptions(); dto.timeoutMs(5 * 1000); DescribeTopicsResult dtr = adminClient.describeTopics(topics, dto); return dtr.all().get().get(topic); }
Example #19
Source File: ProduceService.java From kafka-monitor with Apache License 2.0 | 5 votes |
@Override public synchronized void start() { if (_running.compareAndSet(false, true)) { try { KafkaFuture<Map<String, TopicDescription>> topicDescriptionsFuture = _adminClient.describeTopics(Collections.singleton(_topic)).all(); Map<String, TopicDescription> topicDescriptions = topicDescriptionsFuture.get(); int partitionNum = topicDescriptions.get(_topic).partitions().size(); initializeStateForPartitions(partitionNum); _handleNewPartitionsExecutor.scheduleWithFixedDelay(new NewPartitionHandler(), 1, 30, TimeUnit.SECONDS); LOG.info("{}/ProduceService started", _name); } catch (InterruptedException | UnknownTopicOrPartitionException | ExecutionException e) { LOG.error("Exception occurred while starting produce service for topic: {}", _topic, e); } } }
Example #20
Source File: ExecutorTest.java From cruise-control with BSD 2-Clause "Simplified" License | 5 votes |
@Test public void testBrokerDiesBeforeMovingPartition() throws Exception { KafkaZkClient kafkaZkClient = KafkaCruiseControlUtils.createKafkaZkClient(zookeeper().connectionString(), "ExecutorTestMetricGroup", "BrokerDiesBeforeMovingPartition", false); try { Map<String, TopicDescription> topicDescriptions = createTopics((int) PRODUCE_SIZE_IN_BYTES); // initialLeader0 will be alive after killing a broker in cluster. int initialLeader0 = topicDescriptions.get(TOPIC0).partitions().get(0).leader().id(); int initialLeader1 = topicDescriptions.get(TOPIC1).partitions().get(0).leader().id(); // Kill broker before starting the reassignment. _brokers.get(initialLeader0 == 0 ? 1 : 0).shutdown(); ExecutionProposal proposal0 = new ExecutionProposal(TP0, PRODUCE_SIZE_IN_BYTES, new ReplicaPlacementInfo(initialLeader0), Collections.singletonList(new ReplicaPlacementInfo(initialLeader0)), Collections.singletonList(new ReplicaPlacementInfo(initialLeader0 == 0 ? 1 : 0))); ExecutionProposal proposal1 = new ExecutionProposal(TP1, 0, new ReplicaPlacementInfo(initialLeader1), Arrays.asList(new ReplicaPlacementInfo(initialLeader1), new ReplicaPlacementInfo(initialLeader1 == 0 ? 1 : 0)), Arrays.asList(new ReplicaPlacementInfo(initialLeader1 == 0 ? 1 : 0), new ReplicaPlacementInfo(initialLeader1))); Collection<ExecutionProposal> proposalsToExecute = Arrays.asList(proposal0, proposal1); executeAndVerifyProposals(kafkaZkClient, proposalsToExecute, Collections.emptyList(), true, null, false); // We are not doing the rollback. assertEquals(Collections.singletonList(initialLeader0 == 0 ? 1 : 0), ExecutorUtils.newAssignmentForPartition(kafkaZkClient, TP0)); // The leadership should be on the alive broker. assertEquals(initialLeader0, kafkaZkClient.getLeaderForPartition(TP0).get()); assertEquals(initialLeader0, kafkaZkClient.getLeaderForPartition(TP1).get()); } finally { KafkaCruiseControlUtils.closeKafkaZkClientWithTimeout(kafkaZkClient); } }
Example #21
Source File: KafkaTopicProvisioner.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@Override public ProducerDestination provisionProducerDestination(final String name, ExtendedProducerProperties<KafkaProducerProperties> properties) { if (logger.isInfoEnabled()) { logger.info("Using kafka topic for outbound: " + name); } KafkaTopicUtils.validateTopicName(name); try (AdminClient adminClient = AdminClient.create(this.adminClientProperties)) { createTopic(adminClient, name, properties.getPartitionCount(), false, properties.getExtension().getTopic()); int partitions = 0; Map<String, TopicDescription> topicDescriptions = new HashMap<>(); if (this.configurationProperties.isAutoCreateTopics()) { this.metadataRetryOperations.execute(context -> { try { if (logger.isDebugEnabled()) { logger.debug("Attempting to retrieve the description for the topic: " + name); } DescribeTopicsResult describeTopicsResult = adminClient .describeTopics(Collections.singletonList(name)); KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult .all(); topicDescriptions.putAll(all.get(this.operationTimeout, TimeUnit.SECONDS)); } catch (Exception ex) { throw new ProvisioningException("Problems encountered with partitions finding", ex); } return null; }); } TopicDescription topicDescription = topicDescriptions.get(name); if (topicDescription != null) { partitions = topicDescription.partitions().size(); } return new KafkaProducerDestination(name, partitions); } }
Example #22
Source File: KafkaAvailability.java From strimzi-kafka-operator with Apache License 2.0 | 5 votes |
private Set<TopicDescription> groupTopicsByBroker(Collection<TopicDescription> tds, int podId) { Set<TopicDescription> topicPartitionInfos = new HashSet<>(); for (TopicDescription td : tds) { log.trace("{}", td); for (TopicPartitionInfo pd : td.partitions()) { for (Node broker : pd.replicas()) { if (podId == broker.id()) { topicPartitionInfos.add(td); } } } } return topicPartitionInfos; }
Example #23
Source File: KafkaTestUtilsTest.java From kafka-junit with BSD 3-Clause "New" or "Revised" License | 5 votes |
/** * Example showing how to get information about a topic in your Kafka cluster. */ @Test void testDescribeTopic() { final TopicDescription topicDescription = getKafkaTestUtils().describeTopic(topicName); Assertions.assertNotNull(topicDescription, "Should return a result"); // Debug logging. logger.info("Found topic with name {} and {} partitions.", topicDescription.name(), topicDescription.partitions().size()); Assertions.assertEquals(3, topicDescription.partitions().size(), "Topic should have 3 partitions"); Assertions.assertFalse(topicDescription.isInternal(), "Our topic is not an internal topic"); Assertions.assertEquals(topicName, topicDescription.name(), "Has the correct name"); }
Example #24
Source File: CruiseControlMetricsReporterAutoCreateTopicTest.java From cruise-control with BSD 2-Clause "Simplified" License | 5 votes |
@Test public void testAutoCreateMetricsTopic() throws ExecutionException, InterruptedException { Properties props = new Properties(); props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()); AdminClient adminClient = AdminClient.create(props); TopicDescription topicDescription = adminClient.describeTopics(Collections.singleton(TOPIC)).values().get(TOPIC).get(); // assert that the metrics topic was created with partitions and replicas as configured for the metrics report auto-creation assertEquals(1, topicDescription.partitions().size()); assertEquals(1, topicDescription.partitions().get(0).replicas().size()); }
Example #25
Source File: KafkaAvailability.java From strimzi-kafka-operator with Apache License 2.0 | 5 votes |
protected Future<Collection<TopicDescription>> describeTopics(Set<String> names) { Promise<Collection<TopicDescription>> descPromise = Promise.promise(); ac.describeTopics(names).all() .whenComplete((tds, error) -> { if (error != null) { descPromise.fail(error); } else { log.debug("Got topic descriptions for {} topics", tds.size()); descPromise.complete(tds.values()); } }); return descPromise.future(); }
Example #26
Source File: KafkaAvailabilityTest.java From strimzi-kafka-operator with Apache License 2.0 | 5 votes |
void mockDescribeTopics(Admin mockAc) { when(mockAc.describeTopics(any())).thenAnswer(invocation -> { DescribeTopicsResult dtr = mock(DescribeTopicsResult.class); Collection<String> topicNames = invocation.getArgument(0); Throwable throwable = null; for (String topicName : topicNames) { throwable = describeTopicsResult.get(topicName); if (throwable != null) { break; } } if (throwable != null) { when(dtr.all()).thenReturn(failedFuture(throwable)); } else { Map<String, TopicDescription> tds = topics.entrySet().stream().collect(Collectors.toMap( e -> e.getKey(), e -> { TSB tsb = e.getValue(); return new TopicDescription(tsb.name, tsb.internal, tsb.partitions.entrySet().stream().map(e1 -> { TSB.PSB psb = e1.getValue(); return new TopicPartitionInfo(psb.id, psb.leader != null ? node(psb.leader) : Node.noNode(), Arrays.stream(psb.replicaOn).boxed().map(broker -> node(broker)).collect(Collectors.toList()), Arrays.stream(psb.isr).boxed().map(broker -> node(broker)).collect(Collectors.toList())); }).collect(Collectors.toList())); } )); when(dtr.all()).thenReturn(KafkaFuture.completedFuture(tds)); when(dtr.values()).thenThrow(notImplemented()); } return dtr; }); }
Example #27
Source File: ExecutorTest.java From cruise-control with BSD 2-Clause "Simplified" License | 5 votes |
/** * Proposal#1: [TPO] move from original broker to the other one -- e.g. 0 -> 1 * Proposal#2: [TP1] change order and leader -- e.g. [0, 1] -> [1, 0] */ private void populateProposals(List<ExecutionProposal> proposalToExecute, List<ExecutionProposal> proposalToVerify, long topicSize) throws InterruptedException { Map<String, TopicDescription> topicDescriptions = createTopics((int) topicSize); int initialLeader0 = topicDescriptions.get(TOPIC0).partitions().get(0).leader().id(); int initialLeader1 = topicDescriptions.get(TOPIC1).partitions().get(0).leader().id(); // Valid proposals ExecutionProposal proposal0 = new ExecutionProposal(TP0, topicSize, new ReplicaPlacementInfo(initialLeader0), Collections.singletonList(new ReplicaPlacementInfo(initialLeader0)), Collections.singletonList(new ReplicaPlacementInfo(initialLeader0 == 0 ? 1 : 0))); ExecutionProposal proposal1 = new ExecutionProposal(TP1, 0, new ReplicaPlacementInfo(initialLeader1), Arrays.asList(new ReplicaPlacementInfo(initialLeader1), new ReplicaPlacementInfo(initialLeader1 == 0 ? 1 : 0)), Arrays.asList(new ReplicaPlacementInfo(initialLeader1 == 0 ? 1 : 0), new ReplicaPlacementInfo(initialLeader1))); // Invalid proposals, the targeting topics of these proposals does not exist. ExecutionProposal proposal2 = new ExecutionProposal(TP2, 0, new ReplicaPlacementInfo(initialLeader0), Collections.singletonList(new ReplicaPlacementInfo(initialLeader0)), Collections.singletonList(new ReplicaPlacementInfo(initialLeader0 == 0 ? 1 : 0))); ExecutionProposal proposal3 = new ExecutionProposal(TP3, 0, new ReplicaPlacementInfo(initialLeader1), Arrays.asList(new ReplicaPlacementInfo(initialLeader1), new ReplicaPlacementInfo(initialLeader1 == 0 ? 1 : 0)), Arrays.asList(new ReplicaPlacementInfo(initialLeader1 == 0 ? 1 : 0), new ReplicaPlacementInfo(initialLeader1))); proposalToExecute.addAll(Arrays.asList(proposal0, proposal1, proposal2, proposal3)); proposalToVerify.addAll(Arrays.asList(proposal0, proposal1)); }
Example #28
Source File: LeaderInSyncByPartitionFunctionTest.java From data-highway with Apache License 2.0 | 5 votes |
@Test public void typical() throws Exception { TopicPartitionInfo tpi = new TopicPartitionInfo(0, node0, singletonList(node0), singletonList(node0)); KafkaFuture<Map<String, TopicDescription>> kafkaFuture = topicDescriptionFuture(tpi); doReturn(describeTopicsResult).when(adminClient).describeTopics(topics); doReturn(kafkaFuture).when(describeTopicsResult).all(); Map<TopicPartition, LeaderInSync> result = underTest.apply(0, topics); assertThat(result.size(), is(1)); LeaderInSync leaderInSync = result.get(new TopicPartition(topic, 0)); assertThat(leaderInSync.isLeader(), is(true)); assertThat(leaderInSync.isInSync(), is(true)); }
Example #29
Source File: KafkaImplTest.java From strimzi-kafka-operator with Apache License 2.0 | 5 votes |
private void mockDescribeTopics(Admin admin, Map<String, Either<TopicDescription, Exception>> result) { DescribeTopicsResult describeTopicsResult = mock(DescribeTopicsResult.class); when(describeTopicsResult.values()).thenReturn(result.entrySet().stream().collect(toMap( entry1 -> entry1.getKey(), entry1 -> { KafkaFutureImpl<TopicDescription> kafkaFuture1 = new KafkaFutureImpl<>(); if (entry1.getValue().isLeft()) { kafkaFuture1.complete(entry1.getValue().left()); } else { kafkaFuture1.completeExceptionally(entry1.getValue().right()); } return kafkaFuture1; }))); Optional<Either<TopicDescription, Exception>> first = result.values().stream().filter(either -> !either.isLeft()).findFirst(); if (first.isPresent()) { KafkaFutureImpl<Map<String, TopicDescription>> kafkaFuture = new KafkaFutureImpl<>(); kafkaFuture.completeExceptionally(first.get().right()); when(describeTopicsResult.all()).thenReturn(kafkaFuture); } else { when(describeTopicsResult.all()).thenReturn(KafkaFuture.completedFuture( result.entrySet().stream().collect(toMap( entry -> entry.getKey(), entry -> entry.getValue().left())) )); } when(admin.describeTopics(result.keySet())).thenReturn(describeTopicsResult); }
Example #30
Source File: Utils.java From strimzi-kafka-operator with Apache License 2.0 | 5 votes |
public static TopicMetadata getTopicMetadata(String topicName, Config config) { Node node0 = new Node(0, "host0", 1234); Node node1 = new Node(1, "host1", 1234); Node node2 = new Node(2, "host2", 1234); List<Node> nodes02 = asList(node0, node1, node2); TopicDescription desc = new TopicDescription(topicName, false, asList( new TopicPartitionInfo(0, node0, nodes02, nodes02), new TopicPartitionInfo(1, node0, nodes02, nodes02) )); //org.apache.kafka.clients.admin.Config config = new Config(configs); return new TopicMetadata(desc, config); }