com.codahale.metrics.MetricFilter Scala Examples
The following examples show how to use com.codahale.metrics.MetricFilter.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: package.scala From zio-metrics with Apache License 2.0 | 5 votes |
package zio.metrics.dropwizard import zio.{ Has, Layer, Task, ZLayer } import java.util.concurrent.TimeUnit import java.io.File import java.util.Locale import java.net.InetSocketAddress import java.util.concurrent.TimeUnit import org.slf4j.LoggerFactory import java.{ util => ju } import java.io.File package object reporters { import com.codahale.metrics.MetricRegistry import com.codahale.metrics.MetricFilter import com.codahale.metrics.graphite.Graphite import com.codahale.metrics.graphite.GraphiteReporter import com.codahale.metrics.ConsoleReporter import com.codahale.metrics.Slf4jReporter import com.codahale.metrics.CsvReporter import com.codahale.metrics.jmx.JmxReporter import com.codahale.metrics.Reporter type Reporters = Has[Reporters.Service] object Reporters { trait Service { def jmx(r: MetricRegistry): Task[JmxReporter] def console(r: MetricRegistry): Task[ConsoleReporter] def slf4j(r: MetricRegistry, duration: Int, unit: TimeUnit, loggerName: String): Task[Slf4jReporter] def csv(r: MetricRegistry, file: File, locale: Locale): Task[Reporter] def graphite(r: MetricRegistry, host: String, port: Int, prefix: String): Task[GraphiteReporter] } val live: Layer[Nothing, Reporters] = ZLayer.succeed(new Service { def jmx(r: MetricRegistry): zio.Task[JmxReporter] = Task(JmxReporter.forRegistry(r).build()) def console(r: MetricRegistry): Task[ConsoleReporter] = Task( ConsoleReporter .forRegistry(r) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .build() ) def slf4j(r: MetricRegistry, duration: Int, unit: TimeUnit, loggerName: String): Task[Slf4jReporter] = Task( Slf4jReporter .forRegistry(r) .outputTo(LoggerFactory.getLogger(loggerName)) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .build() ) def csv(r: MetricRegistry, file: File, locale: ju.Locale): zio.Task[Reporter] = Task( CsvReporter .forRegistry(r) .formatFor(locale) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .build(file) ) def graphite(r: MetricRegistry, host: String, port: Int, prefix: String): zio.Task[GraphiteReporter] = Task { val graphite = new Graphite(new InetSocketAddress(host, port)) GraphiteReporter .forRegistry(r) .prefixedWith(prefix) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .filter(MetricFilter.ALL) .build(graphite) } }) } }
Example 2
Source File: package.scala From zio-metrics with Apache License 2.0 | 5 votes |
package zio.metrics import zio.{ Has, ZLayer } import zio.{ Ref, Task, UIO } package object dropwizard { import com.codahale.metrics.{ MetricFilter, MetricRegistry } import com.codahale.metrics.{ Counter => DWCounter, Gauge => DWGauge } import com.codahale.metrics.{ Histogram => DWHistogram, Timer => DWTimer } import com.codahale.metrics.{ Meter => DWMeter } import com.codahale.metrics.MetricRegistry.MetricSupplier import com.codahale.metrics.Reservoir type Registry = Has[Registry.Service] type HasMetricRegistry = Has[Option[MetricRegistry]] object Registry { trait Service { def getCurrent(): UIO[MetricRegistry] def registerCounter[L: Show](label: Label[L]): Task[DWCounter] def registerGauge[L: Show, A](label: Label[L], f: () => A): Task[DWGauge[A]] def registerHistogram[L: Show](label: Label[L], reservoir: Reservoir): Task[DWHistogram] def registerMeter[L: Show](label: Label[L]): Task[DWMeter] def registerTimer[L: Show](label: Label[L]): Task[DWTimer] } private def label2Name[L: Show](label: Label[L]): String = MetricRegistry.name(Show[L].show(label.name), label.labels: _*) val explicit: ZLayer[HasMetricRegistry, Nothing, Registry] = ZLayer.fromFunction[HasMetricRegistry, Registry.Service]( optionalRegistry => new Service { private val registryRef: UIO[Ref[MetricRegistry]] = { val registry = optionalRegistry.get Ref.make(registry.getOrElse(new MetricRegistry())) } def getCurrent(): UIO[MetricRegistry] = registryRef >>= (_.get) def registerCounter[L: Show](label: Label[L]): Task[DWCounter] = registryRef >>= (_.modify(r => { val name = label2Name(label) (r.counter(name), r) })) def registerGauge[L: Show, A](label: Label[L], f: () => A): Task[DWGauge[A]] = registryRef >>= (_.modify(r => { val name = label2Name(label) val gauges = r.getGauges(MetricFilter.startsWith(name)) val dwgauge = if (gauges.isEmpty()) { val gw = new DWGauge[A]() { def getValue(): A = f() } gw.asInstanceOf[DWGauge[A]] } else gauges.get(gauges.firstKey()).asInstanceOf[DWGauge[A]] (r.register(name, dwgauge), r) })) def registerHistogram[L: Show](label: Label[L], reservoir: Reservoir): Task[DWHistogram] = registryRef >>= (_.modify(r => { val name = label2Name(label) val suppplier = new MetricSupplier[DWHistogram] { def newMetric(): DWHistogram = new DWHistogram(reservoir) } (r.histogram(name, suppplier), r) })) def registerTimer[L: Show](label: Label[L]): Task[DWTimer] = registryRef >>= (_.modify(r => { val name = label2Name(label) (r.timer(name), r) })) def registerMeter[L: Show](label: Label[L]): Task[DWMeter] = registryRef >>= (_.modify(r => { val name = label2Name(label) (r.meter(name), r) })) } ) val live: ZLayer[Any, Nothing, Registry] = ZLayer.succeed[Option[MetricRegistry]](None) >>> explicit def makeFilter(filter: Option[String]): MetricFilter = filter match { case Some(s) => s.charAt(0) match { case '+' => MetricFilter.startsWith(s.substring(1)) case '-' => MetricFilter.endsWith(s.substring(1)) case _ => MetricFilter.contains(s) } case _ => MetricFilter.ALL } } }
Example 3
Source File: StatsDSink.scala From spark-statsd with Apache License 2.0 | 5 votes |
package org.apache.spark.metrics.sink import java.util.Properties import java.util.concurrent.TimeUnit import com.codahale.metrics.{MetricRegistry, MetricFilter} import com.readytalk.metrics.StatsDReporter import org.apache.spark.SecurityManager import org.apache.spark.metrics.MetricsSystem import org.apache.spark.metrics.sink.Sink private[spark] class StatsDSink(val property: Properties, val registry: MetricRegistry, securityMgr: SecurityManager) extends Sink { val STATSD_DEFAULT_PERIOD = 10 val STATSD_DEFAULT_UNIT = "SECONDS" val STATSD_DEFAULT_PREFIX = "" val STATSD_KEY_HOST = "host" val STATSD_KEY_PORT = "port" val STATSD_KEY_PERIOD = "period" val STATSD_KEY_UNIT = "unit" val STATSD_KEY_PREFIX = "prefix" def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop)) if (!propertyToOption(STATSD_KEY_HOST).isDefined) { throw new Exception("StatsD sink requires 'host' property.") } if (!propertyToOption(STATSD_KEY_PORT).isDefined) { throw new Exception("StatsD sink requires 'port' property.") } val host = propertyToOption(STATSD_KEY_HOST).get val port = propertyToOption(STATSD_KEY_PORT).get.toInt val pollPeriod = propertyToOption(STATSD_KEY_PERIOD) match { case Some(s) => s.toInt case None => STATSD_DEFAULT_PERIOD } val pollUnit: TimeUnit = propertyToOption(STATSD_KEY_UNIT) match { case Some(s) => TimeUnit.valueOf(s.toUpperCase()) case None => TimeUnit.valueOf(STATSD_DEFAULT_UNIT) } val prefix = propertyToOption(STATSD_KEY_PREFIX).getOrElse(STATSD_DEFAULT_PREFIX) MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod) val reporter = StatsDReporter.forRegistry(registry) .prefixedWith(prefix) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(host, port) override def start() { reporter.start(pollPeriod, pollUnit) } override def stop() { reporter.stop() } override def report() { reporter.report() } }
Example 4
Source File: InfluxDbReporter.scala From flink-stuff with Apache License 2.0 | 5 votes |
package com.jgrier.flinkstuff.metrics import java.util.concurrent.TimeUnit import com.codahale.metrics.{MetricFilter, ScheduledReporter} import metrics_influxdb.{HttpInfluxdbProtocol, InfluxdbReporter} import metrics_influxdb.api.measurements.CategoriesMetricMeasurementTransformer import org.apache.flink.dropwizard.ScheduledDropwizardReporter import org.apache.flink.metrics.MetricConfig class InfluxDbReporter extends ScheduledDropwizardReporter { override def getReporter(metricConfig: MetricConfig): ScheduledReporter = { val server = metricConfig.getString("server", "localhost") val port = metricConfig.getInteger("port", 8086) val user = metricConfig.getString("user", "admin") val password = metricConfig.getString("password", "admin") val db = metricConfig.getString("db", "flink") InfluxdbReporter.forRegistry(registry) .protocol(new HttpInfluxdbProtocol(server, port, user, password, db)) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .filter(MetricFilter.ALL) .skipIdleMetrics(false) .transformer(new CategoriesMetricMeasurementTransformer("host", "process_type", "tm_id", "job_name", "task_name", "subtask_index")) .build() } }
Example 5
Source File: RemoraDatadogReporter.scala From remora with MIT License | 5 votes |
package reporter import java.util.concurrent.TimeUnit import com.codahale.metrics.{Metric, MetricFilter, MetricRegistry} import config.DataDog import models.RegistryKafkaMetric import org.coursera.metrics.datadog.TaggedName.TaggedNameBuilder import org.coursera.metrics.datadog.transport.UdpTransport import org.coursera.metrics.datadog.{DatadogReporter, MetricNameFormatter} class RemoraDatadogReporter(metricRegistry: MetricRegistry, datadogConfig: DataDog) { private val transport = new UdpTransport.Builder().withStatsdHost(datadogConfig.agentHost).withPort(datadogConfig.agentPort).build private val kafkaConsumerGroupFilter: MetricFilter = new MetricFilter { override def matches(metricName: String, metric: Metric): Boolean = { val trackedConsumerGroups = datadogConfig.trackedConsumerGroups trackedConsumerGroups.isEmpty || trackedConsumerGroups.exists(consumerGroupName => metricName.contains(consumerGroupName)) } } private def metricNameFormatter(removeTagsFromMetricName: Boolean): MetricNameFormatter = new MetricNameFormatter { override def format(nameWithPrefix: String, path: String*): String = { RegistryKafkaMetric.decode(nameWithPrefix.replaceFirst(s"${datadogConfig.name}\\.","")) match { case Some(registryKafkaMetric) => val builder = new TaggedNameBuilder().metricName( if (removeTagsFromMetricName) buildNameWithoutTags(registryKafkaMetric) else nameWithPrefix ).addTag("topic", registryKafkaMetric.topic) .addTag("group", registryKafkaMetric.group) registryKafkaMetric.partition.foreach(p => builder.addTag("partition", p)) builder.build().encode() case None => nameWithPrefix } } } private def buildNameWithoutTags(registryKafkaMetric: RegistryKafkaMetric): String = s"${datadogConfig.name}.${registryKafkaMetric.prefix}.${registryKafkaMetric.suffix}" def startReporter(): Unit = DatadogReporter .forRegistry(metricRegistry) .withPrefix(datadogConfig.name) .withTransport(transport) .filter(kafkaConsumerGroupFilter) .withMetricNameFormatter(metricNameFormatter(datadogConfig.removeTagsFromMetricName)) .build .start(datadogConfig.intervalMinutes, TimeUnit.MINUTES) }
Example 6
Source File: RemoraDatadogReporterSpec.scala From remora with MIT License | 5 votes |
package reporter import com.codahale.metrics.{Metric, MetricFilter, MetricRegistry} import config.DataDog import org.coursera.metrics.datadog.MetricNameFormatter import org.scalamock.scalatest.MockFactory import org.scalatest.{FlatSpec, Matchers, PrivateMethodTester} class RemoraDatadogReporterSpec extends FlatSpec with Matchers with PrivateMethodTester with MockFactory { private val metricRegistry: MetricRegistry = new MetricRegistry private val metric: Metric = mock[Metric] private val config = DataDog(enabled = true, "test", 1, "localhost", 8125, List.empty, removeTagsFromMetricName = false) private val configRemoveTags = DataDog(enabled = true, "test", 1, "localhost", 8125, List.empty, removeTagsFromMetricName = true) "Metrics filter" should "match any metric when no filter is given" in { val filter = buildMetricFilter(List.empty) filter.matches("any_metrics_name", metric) should be(true) } it should "match metric containing consumer group name" in { val kafkaConsumerGroupName = "test-consumer1" val filter = buildMetricFilter(List(kafkaConsumerGroupName)) filter.matches(s"metric-name-$kafkaConsumerGroupName", metric) should be(true) } it should "not match metric containing consumer group name" in { val filter = buildMetricFilter(List("test-consumer1")) filter.matches("some-metrics", metric) should be(false) } "Metric name formatter" should "add tag information if metric is well formatted" in { val formatter = getMetricNameFormatter(config) formatter.format(s"${config.name}.gauge.test.1.test-consumer.lag") should be(s"${config.name}.gauge.test.1.test-consumer.lag[topic:test,group:test-consumer,partition:1]") } it should "not add partition tag information if no partition" in { val formatter = getMetricNameFormatter(config) formatter.format(s"${config.name}.gauge.test-topic.test-consumer.totalLag") should be(s"${config.name}.gauge.test-topic.test-consumer.totalLag[topic:test-topic,group:test-consumer]") } it should "not add tag information otherwise" in { val formatter = getMetricNameFormatter(config) formatter.format(s"${config.name}.gauge.test_1_faulty_test-consumer__lag") should be(s"${config.name}.gauge.test_1_faulty_test-consumer__lag") } "Metric name formatter without tags" should "add tag information if metric is well formatted" in { val formatter = getMetricNameFormatter(configRemoveTags) formatter.format(s"${configRemoveTags.name}.gauge.test.1.test-consumer.lag") should be(s"${configRemoveTags.name}.gauge.lag[topic:test,group:test-consumer,partition:1]") } it should "not add partition tag information if no partition" in { val formatter = getMetricNameFormatter(configRemoveTags) formatter.format(s"${configRemoveTags.name}.gauge.test-topic.test-consumer.totalLag") should be(s"${configRemoveTags.name}.gauge.totalLag[topic:test-topic,group:test-consumer]") } private def buildMetricFilter(kafkaConsumerList: List[String], removeTags: Boolean = false): MetricFilter = { val config = DataDog(enabled = true, "test", 1, "localhost", 8125, kafkaConsumerList, removeTags) val reporter = new RemoraDatadogReporter(metricRegistry, config) reporter invokePrivate PrivateMethod[MetricFilter]('kafkaConsumerGroupFilter)() } private def getMetricNameFormatter(config: DataDog): MetricNameFormatter = { val reporter = new RemoraDatadogReporter(metricRegistry, config) reporter invokePrivate PrivateMethod[MetricNameFormatter]('metricNameFormatter)(config.removeTagsFromMetricName) } }
Example 7
Source File: APIMetrics.scala From vinyldns with Apache License 2.0 | 5 votes |
package vinyldns.api.metrics import java.util.concurrent.TimeUnit import cats.effect.{Blocker, ContextShift, IO} import com.codahale.metrics.Slf4jReporter.LoggingLevel import com.codahale.metrics.{Metric, MetricFilter, ScheduledReporter, Slf4jReporter} import com.typesafe.config.Config import org.slf4j.LoggerFactory import pureconfig._ import pureconfig.generic.auto._ import pureconfig.module.catseffect.syntax._ import vinyldns.core.VinylDNSMetrics final case class MemoryMetricsSettings(logEnabled: Boolean, logSeconds: Int) final case class APIMetricsSettings(memory: MemoryMetricsSettings) object APIMetrics { private implicit val cs: ContextShift[IO] = IO.contextShift(scala.concurrent.ExecutionContext.global) // Output all memory metrics to the log, do not start unless configured private val logReporter = Slf4jReporter .forRegistry(VinylDNSMetrics.metricsRegistry) .filter(new MetricFilter { def matches(name: String, metric: Metric): Boolean = name.startsWith("memory") }) .withLoggingLevel(LoggingLevel.INFO) .outputTo(LoggerFactory.getLogger("MemStats")) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .build() def initialize( settings: APIMetricsSettings, reporter: ScheduledReporter = logReporter ): IO[Unit] = IO { if (settings.memory.logEnabled) { reporter.start(settings.memory.logSeconds, TimeUnit.SECONDS) } } def loadSettings(config: Config): IO[APIMetricsSettings] = Blocker[IO].use( ConfigSource.fromConfig(config).loadF[IO, APIMetricsSettings](_) ) }
Example 8
Source File: OffsetGraphiteReporter.scala From kafka-offset-monitor-graphite with Apache License 2.0 | 5 votes |
package pl.allegro.tech.kafka.offset.monitor.graphite import java.net.InetSocketAddress import java.util.concurrent.TimeUnit import com.codahale.metrics.{MetricRegistry, MetricFilter} import com.codahale.metrics.graphite.{GraphiteReporter, Graphite} import com.google.common.cache._ import com.quantifind.kafka.OffsetGetter.OffsetInfo import com.codahale.metrics.Gauge class OffsetGraphiteReporter (pluginsArgs: String) extends com.quantifind.kafka.offsetapp.OffsetInfoReporter { GraphiteReporterArguments.parseArguments(pluginsArgs) val metrics : MetricRegistry = new MetricRegistry() val graphite : Graphite = new Graphite(new InetSocketAddress(GraphiteReporterArguments.graphiteHost, GraphiteReporterArguments.graphitePort)) val reporter : GraphiteReporter = GraphiteReporter.forRegistry(metrics) .prefixedWith(GraphiteReporterArguments.graphitePrefix) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .filter(MetricFilter.ALL) .build(graphite) reporter.start(GraphiteReporterArguments.graphiteReportPeriod, TimeUnit.SECONDS) val removalListener : RemovalListener[String, GaugesValues] = new RemovalListener[String, GaugesValues] { override def onRemoval(removalNotification: RemovalNotification[String, GaugesValues]) = { metrics.remove(removalNotification.getKey() + ".offset") metrics.remove(removalNotification.getKey() + ".logSize") metrics.remove(removalNotification.getKey() + ".lag") } } val gauges : LoadingCache[String, GaugesValues] = CacheBuilder.newBuilder() .expireAfterAccess(GraphiteReporterArguments.metricsCacheExpireSeconds, TimeUnit.SECONDS) .removalListener(removalListener) .build( new CacheLoader[String, GaugesValues]() { def load(key: String): GaugesValues = { val values: GaugesValues = new GaugesValues() val offsetGauge: Gauge[Long] = new Gauge[Long] { override def getValue: Long = { values.offset } } val lagGauge: Gauge[Long] = new Gauge[Long] { override def getValue: Long = { values.lag } } val logSizeGauge: Gauge[Long] = new Gauge[Long] { override def getValue: Long = { values.logSize } } metrics.register(key + ".offset", offsetGauge) metrics.register(key + ".logSize", logSizeGauge) metrics.register(key + ".lag", lagGauge) values } } ) override def report(info: scala.IndexedSeq[OffsetInfo]) = { info.foreach(i => { val values: GaugesValues = gauges.get(getMetricName(i)) values.logSize = i.logSize values.offset = i.offset values.lag = i.lag }) } def getMetricName(offsetInfo: OffsetInfo): String = { offsetInfo.topic.replace(".", "_") + "." + offsetInfo.group.replace(".", "_") + "." + offsetInfo.partition } }