kafka.metrics.KafkaMetricsConfig Java Examples
The following examples show how to use
kafka.metrics.KafkaMetricsConfig.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaHttpMetricsReporter.java From kafka-http-metrics-reporter with Apache License 2.0 | 6 votes |
@Override public void init(VerifiableProperties verifiableProperties) { if (!initialized) { // get configured metrics from kafka KafkaMetricsConfig metricsConfig = new KafkaMetricsConfig(verifiableProperties); // get the configured properties from kafka to set the bindAddress and port. bindAddress = verifiableProperties.getProperty("kafka.http.metrics.host"); port = Integer.parseInt(verifiableProperties.getProperty("kafka.http.metrics.port")); enabled = Boolean.parseBoolean(verifiableProperties.getProperty("kafka.http.metrics.reporter.enabled")); // construct the Metrics Server metricsServer = new KafkaHttpMetricsServer(bindAddress, port); initialized = true; // call the method startReporter startReporter(metricsConfig.pollingIntervalSecs()); } else { LOG.error("Kafka Http Metrics Reporter already initialized"); } }
Example #2
Source File: KafkaGraphiteMetricsReporter.java From kafka-graphite with Apache License 2.0 | 6 votes |
@Override public synchronized void init(VerifiableProperties props) { if (!initialized) { KafkaMetricsConfig metricsConfig = new KafkaMetricsConfig(props); graphiteHost = props.getString("kafka.graphite.metrics.host", GRAPHITE_DEFAULT_HOST); graphitePort = props.getInt("kafka.graphite.metrics.port", GRAPHITE_DEFAULT_PORT); metricPrefix = props.getString("kafka.graphite.metrics.group", GRAPHITE_DEFAULT_PREFIX); String excludeRegex = props.getString("kafka.graphite.metrics.exclude.regex", null); metricDimensions = Dimension.fromProperties(props.props(), "kafka.graphite.dimension.enabled."); LOG.debug("Initialize GraphiteReporter [{},{},{}]", graphiteHost, graphitePort, metricPrefix); if (excludeRegex != null) { LOG.debug("Using regex [{}] for GraphiteReporter", excludeRegex); metricPredicate = new FilterMetricPredicate(excludeRegex); } reporter = buildGraphiteReporter(); if (props.getBoolean("kafka.graphite.metrics.reporter.enabled", false)) { initialized = true; startReporter(metricsConfig.pollingIntervalSecs()); LOG.debug("GraphiteReporter started."); } } }
Example #3
Source File: KafkaStatsdMetricsReporter.java From kafka-statsd-reporter with MIT License | 5 votes |
@Override public synchronized void init(VerifiableProperties props) { if (!initialized) { KafkaMetricsConfig metricsConfig = new KafkaMetricsConfig(props); statsdHost = props.getString("kafka.statsd.metrics.host", STATSD_DEFAULT_HOST).trim(); statsdPort = props.getInt("kafka.statsd.metrics.port", STATSD_DEFAULT_PORT); statsdGroupPrefix = props.getString("kafka.statsd.metrics.group", STATSD_DEFAULT_PREFIX).trim(); String regex = props.getString("kafka.statsd.metrics.exclude.regex", null); LOG.debug("Initialize StatsdReporter ["+statsdHost+","+statsdPort+","+statsdGroupPrefix+"]"); if (regex != null) { predicate = new RegexMetricPredicate(regex); } try { reporter = new StatsdReporter( Metrics.defaultRegistry(), statsdGroupPrefix, predicate, statsdHost, statsdPort, Clock.defaultClock() ); } catch (IOException e) { LOG.error("Unable to initialize StatsdReporter", e); } if (props.getBoolean("kafka.statsd.metrics.reporter.enabled", false)) { initialized = true; startReporter(metricsConfig.pollingIntervalSecs()); LOG.debug("StatsdReporter started."); } } }
Example #4
Source File: KafkaBrokerReporter.java From metrics-kafka with Apache License 2.0 | 5 votes |
synchronized public void init(VerifiableProperties props) { if (!initialized) { this.props = props; props.props().put("metadata.broker.list", String.format("%s:%d", "localhost", props.getInt("port"))); final KafkaMetricsConfig metricsConfig = new KafkaMetricsConfig(props); this.underlying = new TopicReporter(Metrics.defaultRegistry(), new ProducerConfig(props.props()), "broker%s".format(props.getString("broker.id"))); initialized = true; startReporter(metricsConfig.pollingIntervalSecs()); } }
Example #5
Source File: KafkaTimelineMetricsReporter.java From ambari-metrics with Apache License 2.0 | 4 votes |
@Override public void init(VerifiableProperties props) { synchronized (lock) { if (!initialized) { LOG.info("Initializing Kafka Timeline Metrics Sink"); try { hostname = InetAddress.getLocalHost().getHostName(); //If not FQDN , call DNS if ((hostname == null) || (!hostname.contains("."))) { hostname = InetAddress.getLocalHost().getCanonicalHostName(); } } catch (UnknownHostException e) { LOG.error("Could not identify hostname."); throw new RuntimeException("Could not identify hostname.", e); } // Initialize the collector write strategy super.init(); KafkaMetricsConfig metricsConfig = new KafkaMetricsConfig(props); timeoutSeconds = props.getInt(METRICS_POST_TIMEOUT_SECONDS, DEFAULT_POST_TIMEOUT_SECONDS); int metricsSendInterval = props.getInt(TIMELINE_METRICS_SEND_INTERVAL_PROPERTY, MAX_EVICTION_TIME_MILLIS); int maxRowCacheSize = props.getInt(TIMELINE_METRICS_MAX_ROW_CACHE_SIZE_PROPERTY, MAX_RECS_PER_NAME_DEFAULT); zookeeperQuorum = props.containsKey(COLLECTOR_ZOOKEEPER_QUORUM) ? props.getString(COLLECTOR_ZOOKEEPER_QUORUM) : props.getString("zookeeper.connect"); metricCollectorPort = props.getString(TIMELINE_PORT_PROPERTY, TIMELINE_DEFAULT_PORT); collectorHosts = parseHostsStringIntoCollection(props.getString(TIMELINE_HOSTS_PROPERTY, TIMELINE_DEFAULT_HOST)); metricCollectorProtocol = props.getString(TIMELINE_PROTOCOL_PROPERTY, TIMELINE_DEFAULT_PROTOCOL); instanceId = props.getString(TIMELINE_METRICS_KAFKA_INSTANCE_ID_PROPERTY, null); setInstanceId = props.getBoolean(TIMELINE_METRICS_KAFKA_SET_INSTANCE_ID_PROPERTY, false); hostInMemoryAggregationEnabled = props.getBoolean(TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY, false); hostInMemoryAggregationPort = props.getInt(TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY, 61888); hostInMemoryAggregationProtocol = props.getString(TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_PROTOCOL_PROPERTY, "http"); setMetricsCache(new TimelineMetricsCache(maxRowCacheSize, metricsSendInterval)); if (metricCollectorProtocol.contains("https") || hostInMemoryAggregationProtocol.contains("https")) { String trustStorePath = props.getString(TIMELINE_METRICS_SSL_KEYSTORE_PATH_PROPERTY).trim(); String trustStoreType = props.getString(TIMELINE_METRICS_SSL_KEYSTORE_TYPE_PROPERTY).trim(); String trustStorePwd = props.getString(TIMELINE_METRICS_SSL_KEYSTORE_PASSWORD_PROPERTY).trim(); loadTruststore(trustStorePath, trustStoreType, trustStorePwd); } // Exclusion policy String excludedMetricsStr = props.getString(EXCLUDED_METRICS_PROPERTY, ""); if (!StringUtils.isEmpty(excludedMetricsStr.trim())) { excludedMetricsPrefixes = excludedMetricsStr.trim().split(","); } // Inclusion override String includedMetricsStr = props.getString(INCLUDED_METRICS_PROPERTY, ""); if (!StringUtils.isEmpty(includedMetricsStr.trim())) { includedMetricsPrefixes = includedMetricsStr.trim().split(","); } // Inclusion override String includedMetricsRegexStr = props.getString(INCLUDED_METRICS_REGEX_PROPERTY, ""); if (!StringUtils.isEmpty(includedMetricsRegexStr.trim())) { LOG.info("Including metrics which match the following regex patterns : " + includedMetricsRegexStr); includedMetricsRegex = includedMetricsRegexStr.trim().split(","); } initializeReporter(); if (props.getBoolean(TIMELINE_REPORTER_ENABLED_PROPERTY, false)) { startReporter(metricsConfig.pollingIntervalSecs()); } if (LOG.isDebugEnabled()) { LOG.debug("MetricsSendInterval = " + metricsSendInterval); LOG.debug("MaxRowCacheSize = " + maxRowCacheSize); LOG.debug("Excluded metrics prefixes = " + excludedMetricsStr); LOG.debug("Included metrics prefixes = " + includedMetricsStr); } } } }