org.apache.kafka.common.metrics.Sensor Java Examples
The following examples show how to use
org.apache.kafka.common.metrics.Sensor.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DropwizardReporterTest.java From kafka-dropwizard-reporter with Apache License 2.0 | 6 votes |
@Test public void testMetricChange() throws Exception { Metrics metrics = new Metrics(); DropwizardReporter reporter = new DropwizardReporter(); reporter.configure(new HashMap<String, Object>()); metrics.addReporter(reporter); Sensor sensor = metrics.sensor("kafka.requests"); sensor.add(new MetricName("pack.bean1.avg", "grp1"), new Avg()); Map<String, Gauge> gauges = SharedMetricRegistries.getOrCreate("default").getGauges(); String expectedName = "org.apache.kafka.common.metrics.grp1.pack.bean1.avg"; Assert.assertEquals(1, gauges.size()); Assert.assertEquals(expectedName, gauges.keySet().toArray()[0]); sensor.record(2.1); sensor.record(2.2); sensor.record(2.6); Assert.assertEquals(2.3, (Double)gauges.get(expectedName).getValue(), 0.001); }
Example #2
Source File: ConsumeService.java From kafka-monitor with Apache License 2.0 | 6 votes |
@Override public synchronized void start() { if (_running.compareAndSet(false, true)) { _consumeThread.start(); LOG.info("{}/ConsumeService started.", _name); Sensor topicPartitionCount = metrics.sensor("topic-partitions"); DescribeTopicsResult describeTopicsResult = _adminClient.describeTopics(Collections.singleton(_topic)); Map<String, KafkaFuture<TopicDescription>> topicResultValues = describeTopicsResult.values(); KafkaFuture<TopicDescription> topicDescriptionKafkaFuture = topicResultValues.get(_topic); TopicDescription topicDescription = null; try { topicDescription = topicDescriptionKafkaFuture.get(); } catch (InterruptedException | ExecutionException e) { LOG.error("Exception occurred while getting the topicDescriptionKafkaFuture for topic: {}", _topic, e); } @SuppressWarnings("ConstantConditions") double partitionCount = topicDescription.partitions().size(); topicPartitionCount.add( new MetricName("topic-partitions-count", METRIC_GROUP_NAME, "The total number of partitions for the topic.", tags), new Total(partitionCount)); } }
Example #3
Source File: StockPerformanceMetricsTransformer.java From kafka-streams-in-action with Apache License 2.0 | 6 votes |
@Override @SuppressWarnings("unchecked") public void init(final ProcessorContext processorContext) { keyValueStore = (KeyValueStore) processorContext.getStateStore(stocksStateStore); this.processorContext = processorContext; this.processorContext.schedule(5000, PunctuationType.WALL_CLOCK_TIME, this::doPunctuate); final String tagKey = "task-id"; final String tagValue = processorContext.taskId().toString(); final String nodeName = "StockPerformanceProcessor_"+count.getAndIncrement(); metricsSensor = processorContext.metrics().addLatencyAndThroughputSensor("transformer-node", nodeName, "stock-performance-calculation", Sensor.RecordingLevel.DEBUG, tagKey, tagValue); }
Example #4
Source File: TaskJmxReporter.java From mirus with BSD 3-Clause "New" or "Revised" License | 6 votes |
private void ensureMetricsCreated(ConnectorTaskId taskId) { Map<String, String> tags = getTaskLevelTags(taskId); MetricName taskMetric = getMetric( FAILED_TASK_ATTEMPTS_METRIC_NAME + "-count", TASK_CONNECTOR_JMX_GROUP_NAME, "count of restart attempts to a failed task", taskLevelJmxTags, tags); if (!metrics.metrics().containsKey(taskMetric)) { Sensor sensor = getSensor(taskId.toString()); sensor.add(taskMetric, new Total()); logger.info("Added the task {} to the list of JMX metrics", taskId); logger.debug("Updated set of JMX metrics is {}", metrics.metrics()); } }
Example #5
Source File: KsqlEngineMetrics.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
private Sensor configureErrorRate(Metrics metrics) { Sensor sensor = createSensor(metrics, metricGroupName + "-error-rate"); sensor.add( metrics.metricName("error-rate", this.metricGroupName, "The number of messages which were consumed but not processed. " + "Messages may not be processed if, for instance, the message " + "contents could not be deserialized due to an incompatible schema. " + "Alternately, a consumed messages may not have been produced, hence " + "being effectively dropped. Such messages would also be counted " + "toward the error rate."), new Value()); return sensor; }
Example #6
Source File: KsqlEngineMetrics.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
private Sensor configureMessagesOut(Metrics metrics) { Sensor sensor = createSensor(metrics, metricGroupName + "-messages-produced"); sensor.add( metrics.metricName("messages-produced-per-sec", this.metricGroupName, "The number of messages produced per second across all queries"), new Value()); return sensor; }
Example #7
Source File: KsqlEngineMetrics.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
private Sensor configureMessagesIn(Metrics metrics) { Sensor sensor = createSensor(metrics, metricGroupName + "-messages-consumed"); sensor.add( metrics.metricName("messages-consumed-per-sec", this.metricGroupName, "The number of messages consumed per second across all queries"), new Value()); return sensor; }
Example #8
Source File: KsqlEngineMetrics.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
private Sensor configureTotalMessagesIn(Metrics metrics) { Sensor sensor = createSensor(metrics, metricGroupName + "-total-messages-consumed"); sensor.add( metrics.metricName("messages-consumed-total", this.metricGroupName, "The total number of messages consumed across all queries"), new Value()); return sensor; }
Example #9
Source File: KsqlEngineMetrics.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
private Sensor configureTotalBytesIn(Metrics metrics) { Sensor sensor = createSensor(metrics, metricGroupName + "-total-bytes-consumed"); sensor.add( metrics.metricName("bytes-consumed-total", this.metricGroupName, "The total number of bytes consumed across all queries"), new Value()); return sensor; }
Example #10
Source File: ProduceMetrics.java From kafka-monitor with Apache License 2.0 | 5 votes |
public void addPartitionSensors(int partition) { Sensor recordsProducedSensor = _metrics.sensor("records-produced-partition-" + partition); recordsProducedSensor.add(new MetricName("records-produced-rate-partition-" + partition, XinfraMonitorConstants.METRIC_GROUP_NAME_PRODUCE_SERVICE, "The average number of records per second that are produced to this partition", _tags), new Rate()); _recordsProducedPerPartition.put(partition, recordsProducedSensor); Sensor errorsSensor = _metrics.sensor("produce-error-partition-" + partition); errorsSensor.add(new MetricName("produce-error-rate-partition-" + partition, XinfraMonitorConstants.METRIC_GROUP_NAME_PRODUCE_SERVICE, "The average number of errors per second when producing to this partition", _tags), new Rate()); _produceErrorPerPartition.put(partition, errorsSensor); }
Example #11
Source File: KsqlEngineMetrics.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
private Sensor configureMessageConsumptionByQuerySensor(Metrics metrics) { Sensor sensor = createSensor(metrics, "message-consumption-by-query"); sensor.add(metrics.metricName("messages-consumed-max", this.metricGroupName), new Max()); sensor.add(metrics.metricName("messages-consumed-min", this.metricGroupName), new Min()); sensor.add(metrics.metricName("messages-consumed-avg", this.metricGroupName), new Avg()); return sensor; }
Example #12
Source File: MissingPartitionsJmxReporter.java From mirus with BSD 3-Clause "New" or "Revised" License | 5 votes |
MissingPartitionsJmxReporter(Metrics metrics) { super(metrics); Sensor missingPartsSensor = metrics.sensor(MISSING_DEST_PARTITIONS); MetricName missingPartsName = metrics.metricName(MISSING_DEST_PARTITIONS + "-count", "mirus"); missingPartsSensor.add(missingPartsName, new Value()); this.missingPartsSensor = missingPartsSensor; }
Example #13
Source File: TaskJmxReporter.java From mirus with BSD 3-Clause "New" or "Revised" License | 5 votes |
private void updateTaskMetrics(ConnectorTaskId taskId, TaskState taskStatus) { if (taskStatus.state().equalsIgnoreCase(TaskStatus.State.FAILED.toString())) { Sensor sensor = getSensor(taskId.toString()); sensor.record(1, Time.SYSTEM.milliseconds()); } }
Example #14
Source File: KsqlEngineMetrics.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 4 votes |
private Sensor createSensor(Metrics metrics, String sensorName) { Sensor sensor = metrics.sensor(sensorName); sensors.add(sensor); return sensor; }
Example #15
Source File: TaskJmxReporter.java From mirus with BSD 3-Clause "New" or "Revised" License | 4 votes |
private Sensor getSensor(String taskId) { return metrics.sensor(taskId); }
Example #16
Source File: TopicSensors.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 4 votes |
SensorMetric(Sensor sensor, KafkaMetric metric, Time time, boolean errorMetric) { this.sensor = sensor; this.metric = metric; this.time = time; this.errorMetric = errorMetric; }
Example #17
Source File: WorkersMetrics.java From kafka-workers with Apache License 2.0 | 4 votes |
public void addSensor(String name) { Sensor sensor = metrics.sensor(name); sensor.add(metrics.metricName("value", name), new Value()); }
Example #18
Source File: KsqlEngineMetrics.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 4 votes |
private Sensor configureIdleQueriesSensor(Metrics metrics) { Sensor sensor = createSensor(metrics, "num-idle-queries"); sensor.add(metrics.metricName("num-idle-queries", this.metricGroupName), new Value()); return sensor; }
Example #19
Source File: KsqlEngineMetrics.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 4 votes |
List<Sensor> registeredSensors() { return sensors; }