org.apache.kafka.clients.admin.DescribeLogDirsResult Java Examples

The following examples show how to use org.apache.kafka.clients.admin.DescribeLogDirsResult. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaCruiseControlUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Describe LogDirs using the given bootstrap servers for the given brokers.
 *
 * @param brokers Brokers for which the logDirs will be described.
 * @param adminClientConfigs Configurations used for the AdminClient.
 * @return DescribeLogDirsResult using the given bootstrap servers for the given brokers.
 */
public static DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, Map<String, Object> adminClientConfigs) {
  AdminClient adminClient = KafkaCruiseControlUtils.createAdminClient(adminClientConfigs);
  try {
    return adminClient.describeLogDirs(brokers);
  } finally {
    KafkaCruiseControlUtils.closeAdminClientWithTimeout(adminClient);
  }
}
 
Example #2
Source File: LoadMonitorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
private DescribeLogDirsResult getDescribeLogDirsResult() {
  try {
    // Reflectively set DescribeLogDirsResult's constructor from package private to public.
    Constructor<DescribeLogDirsResult> constructor = DescribeLogDirsResult.class.getDeclaredConstructor(Map.class);
    constructor.setAccessible(true);

    Map<Integer, KafkaFuture<Map<String, DescribeLogDirsResponse.LogDirInfo>>> futureByBroker = new HashMap<>();
    Map<String, DescribeLogDirsResponse.LogDirInfo> logdirInfoBylogdir =  new HashMap<>();
    Map<TopicPartition, DescribeLogDirsResponse.ReplicaInfo> replicaInfoByPartition = new HashMap<>();
    replicaInfoByPartition.put(T0P0, new DescribeLogDirsResponse.ReplicaInfo(0, 0, false));
    replicaInfoByPartition.put(T0P1, new DescribeLogDirsResponse.ReplicaInfo(0, 0, false));
    replicaInfoByPartition.put(T1P0, new DescribeLogDirsResponse.ReplicaInfo(0, 0, false));
    replicaInfoByPartition.put(T1P1, new DescribeLogDirsResponse.ReplicaInfo(0, 0, false));
    logdirInfoBylogdir.put("/tmp/kafka-logs", new DescribeLogDirsResponse.LogDirInfo(Errors.NONE, replicaInfoByPartition));
    futureByBroker.put(0, completedFuture(logdirInfoBylogdir));

    logdirInfoBylogdir =  new HashMap<>();
    replicaInfoByPartition = new HashMap<>();
    replicaInfoByPartition.put(T0P0, new DescribeLogDirsResponse.ReplicaInfo(0, 0, false));
    replicaInfoByPartition.put(T0P1, new DescribeLogDirsResponse.ReplicaInfo(0, 0, false));
    replicaInfoByPartition.put(T1P0, new DescribeLogDirsResponse.ReplicaInfo(0, 0, false));
    logdirInfoBylogdir.put("/tmp/kafka-logs-1", new DescribeLogDirsResponse.LogDirInfo(Errors.NONE, replicaInfoByPartition));
    logdirInfoBylogdir.put("/tmp/kafka-logs-2",
                           new DescribeLogDirsResponse.LogDirInfo(Errors.NONE,
                                                                  Collections.singletonMap(T1P1,
                                                                                           new DescribeLogDirsResponse.ReplicaInfo(0, 0, false))));
    futureByBroker.put(1, completedFuture(logdirInfoBylogdir));
    return constructor.newInstance(futureByBroker);
  } catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException e) {
    // Let it go.
  }
  return null;
}
 
Example #3
Source File: KafkaMetricsServiceImpl.java    From kafka-eagle with Apache License 2.0 4 votes vote down vote up
public JSONObject topicKafkaCapacity(String clusterAlias, String topic) {
	if (Kafka.CONSUMER_OFFSET_TOPIC.equals(topic)) {
		return new JSONObject();
	}
	Properties prop = new Properties();
	prop.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, parseBrokerServer(clusterAlias));

	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.sasl.enable")) {
		kafkaService.sasl(prop, clusterAlias);
	}
	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.ssl.enable")) {
		kafkaService.ssl(prop, clusterAlias);
	}
	long sum = 0L;
	AdminClient adminClient = null;
	try {
		adminClient = AdminClient.create(prop);
		List<MetadataInfo> leaders = kafkaService.findKafkaLeader(clusterAlias, topic);
		Set<Integer> ids = new HashSet<>();
		for (MetadataInfo metadata : leaders) {
			ids.add(metadata.getLeader());
		}
		DescribeLogDirsResult logSizeBytes = adminClient.describeLogDirs(ids);
		Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>> tmp = logSizeBytes.all().get();
		if (tmp == null) {
			return new JSONObject();
		}

		for (Map.Entry<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>> entry : tmp.entrySet()) {
			Map<String, DescribeLogDirsResponse.LogDirInfo> logDirInfos = entry.getValue();
			for (Map.Entry<String, DescribeLogDirsResponse.LogDirInfo> logDirInfo : logDirInfos.entrySet()) {
				DescribeLogDirsResponse.LogDirInfo info = logDirInfo.getValue();
				Map<TopicPartition, DescribeLogDirsResponse.ReplicaInfo> replicaInfoMap = info.replicaInfos;
				for (Map.Entry<TopicPartition, DescribeLogDirsResponse.ReplicaInfo> replicas : replicaInfoMap.entrySet()) {
					if (topic.equals(replicas.getKey().topic())) {
						sum += replicas.getValue().size;
					}
				}
			}
		}

	} catch (Exception e) {
		LOG.error("Get topic capacity has error, msg is " + e.getCause().getMessage());
		e.printStackTrace();
	} finally {
		adminClient.close();
	}
	return StrUtils.stringifyByObject(sum);
}