com.codahale.metrics.JmxReporter Scala Examples

The following examples show how to use com.codahale.metrics.JmxReporter. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: MetricsFactory.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.metrics

import akka.http.scaladsl.model.{ HttpMethod, HttpMethods }
import com.codahale.metrics.{ JmxReporter, MetricRegistry }

object MetricsFactory {

  val ALL_REQUEST = "requests.status.all.total"
  val SUCCESS_REQUEST = "requests.status.success.total"
  val FAILURE_REQUEST = "requests.status.failure.total"
  val UNAUTHENTICATED_REQUEST = "requests.status.unauthenticated.total"
  val REQUEST_TIME = "requests.nanoseconds.total"
  val REQUEST_TIME_HIST = "requests.time.histogram"
  val HTTP_METHOD = "{httpMethod}"
  val HTTP_DIRECTION = "{InOut}"
  val REQUEST_CONTEXT_LENGTH = s"requests.method.$HTTP_METHOD.$HTTP_DIRECTION.context.length.bytes"
  val REQUEST_CONTEXT_LENGTH_SUM = s"requests.method.$HTTP_METHOD.$HTTP_DIRECTION.context.length.bytes.total"
  val REQUEST_QUEUE_OCCUPIED = "request.queue.occupied"
  val REQUEST_USER = "{user}"
  val REQUEST_QUEUE_OCCUPIED_BY_USER = s"request.queue.occupied.by.$REQUEST_USER"
  val ERROR_REPORTED_TOTAL = "errors.reported.total"
  val OBJECTS_UPLOAD_OPERATIONS_TOTAL = s"requests.method.$HTTP_METHOD.operations.total"
  val KAFKA_SENT_NOTIFICATION_TOTAL = "requests.kafka.notification.sent.total"
  val KAFKA_SENT_NOTIFICATION_ERROR_TOTAL = "requests.kafka.notification.sent.errors.total"

  private[this] val metrics = new MetricRegistry()

  JmxReporter.forRegistry(metrics).inDomain("rokku").build.start()

  def registryMetrics(): MetricRegistry = metrics

  def countRequest(name: String, count: Long = 1, countAll: Boolean = true): Unit = {
    metrics.counter(name).inc(count)
    if (countAll) metrics.counter(ALL_REQUEST).inc()
  }

  def markRequestTime(time: Long): Unit = {
    metrics.counter(REQUEST_TIME).inc(time)
    metrics.histogram(REQUEST_TIME_HIST).update(time)
  }

  def incrementRequestQueue(name: String): Unit = {
    metrics.counter(name).inc()
    metrics.counter(REQUEST_QUEUE_OCCUPIED).inc()
  }

  def decrementRequestQueue(name: String): Unit = {
    metrics.counter(name).dec()
    metrics.counter(REQUEST_QUEUE_OCCUPIED).dec()
  }

  def countLogErrors(name: String): Unit = {
    metrics.counter(name).inc()
  }

  def incrementObjectsUploaded(requestMethodName: HttpMethod): Unit = {
    requestMethodName match {
      case HttpMethods.PUT | HttpMethods.POST =>
        metrics.counter(OBJECTS_UPLOAD_OPERATIONS_TOTAL.replace(MetricsFactory.HTTP_METHOD, requestMethodName.value)).inc()
      case _ =>
    }
  }

  def incrementKafkaNotificationsSent(requestMethodName: HttpMethod): Unit = {
    requestMethodName match {
      case HttpMethods.PUT | HttpMethods.POST => metrics.counter(KAFKA_SENT_NOTIFICATION_TOTAL).inc()
      case _                                  =>
    }
  }

  def incrementKafkaSendErrors(): Unit = {
    metrics.counter(KAFKA_SENT_NOTIFICATION_ERROR_TOTAL).inc()
  }
} 
Example 2
Source File: MetricRegistryFactory.scala    From money   with Apache License 2.0 5 votes vote down vote up
package com.comcast.money.core.metrics

import com.codahale.metrics.{ JmxReporter, MetricRegistry }
import com.typesafe.config.Config
import org.slf4j.LoggerFactory


object MetricRegistryFactory {

  private val logger = LoggerFactory.getLogger("com.comcast.money.core.metrics.MetricRegistryFactory")

  def metricRegistry(config: Config): MetricRegistry = {
    try {
      val realFactory =
        if (config.hasPath("metrics-registry.class-name"))
          Class.forName(config.getString("metrics-registry.class-name")).newInstance.asInstanceOf[MetricRegistryFactory]
        else
          new DefaultMetricRegistryFactory()

      // Ask the custom factory for an MetricRegistry - and pass in our configuration so that an implementation
      // can add their settings in the application.conf, too.
      realFactory.metricRegistry(config)
    } catch {
      case e: Throwable =>
        logger.error("Unable to create actual factory instance", e)
        throw e
    }
  }
}

class DefaultMetricRegistryFactory extends MetricRegistryFactory {
  override def metricRegistry(config: Config): MetricRegistry = {
    val registry = new MetricRegistry
    val jmxReporter = JmxReporter.forRegistry(registry).build()
    jmxReporter.start()

    registry
  }
}

trait MetricRegistryFactory {
  def metricRegistry(config: Config): MetricRegistry
} 
Example 3
Source File: Service.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.reader

import java.io.File

import com.codahale.metrics.JmxReporter
import com.expedia.www.haystack.commons.logger.LoggerUtils
import com.expedia.www.haystack.commons.metrics.MetricsSupport
import com.expedia.www.haystack.trace.reader.config.ProviderConfiguration
import com.expedia.www.haystack.trace.reader.services.{GrpcHealthService, TraceService}
import com.expedia.www.haystack.trace.reader.stores.EsIndexedTraceStore
import io.grpc.netty.NettyServerBuilder
import org.slf4j.{Logger, LoggerFactory}

object Service extends MetricsSupport {
  private val LOGGER: Logger = LoggerFactory.getLogger("TraceReader")

  // primary executor for service's async tasks
  implicit private val executor = scala.concurrent.ExecutionContext.global

  def main(args: Array[String]): Unit = {
    startJmxReporter()
    startService()
  }

  private def startJmxReporter(): Unit = {
    JmxReporter
      .forRegistry(metricRegistry)
      .build()
      .start()
  }

  private def startService(): Unit = {
    try {
      val config = new ProviderConfiguration

      val store = new EsIndexedTraceStore(
        config.traceBackendConfiguration,
        config.elasticSearchConfiguration,
        config.whitelistedFieldsConfig)(executor)

      val serviceConfig = config.serviceConfig

      val serverBuilder = NettyServerBuilder
        .forPort(serviceConfig.port)
        .directExecutor()
        .addService(new TraceService(store, config.traceValidatorConfig, config.traceTransformerConfig)(executor))
        .addService(new GrpcHealthService())

      // enable ssl if enabled
      if (serviceConfig.ssl.enabled) {
        serverBuilder.useTransportSecurity(new File(serviceConfig.ssl.certChainFilePath), new File(serviceConfig.ssl.privateKeyPath))
      }

      // default max message size in grpc is 4MB. if our max message size is greater than 4MB then we should configure this
      // limit in the netty based grpc server.
      if (serviceConfig.maxSizeInBytes > 4 * 1024 * 1024) serverBuilder.maxMessageSize(serviceConfig.maxSizeInBytes)

      val server = serverBuilder.build().start()

      LOGGER.info(s"server started, listening on ${serviceConfig.port}")

      Runtime.getRuntime.addShutdownHook(new Thread() {
        override def run(): Unit = {
          LOGGER.info("shutting down gRPC server since JVM is shutting down")
          server.shutdown()
          store.close()
          LOGGER.info("server has been shutdown now")
        }
      })

      server.awaitTermination()
    } catch {
      case ex: Throwable =>
        ex.printStackTrace()
        LOGGER.error("Fatal error observed while running the app", ex)
        LoggerUtils.shutdownLogger()
        System.exit(1)
    }
  }
} 
Example 4
Source File: Service.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.storage.backends.memory

import java.io.File

import com.codahale.metrics.JmxReporter
import com.expedia.www.haystack.commons.logger.LoggerUtils
import com.expedia.www.haystack.commons.metrics.MetricsSupport
import com.expedia.www.haystack.trace.storage.backends.memory.config.ProjectConfiguration
import com.expedia.www.haystack.trace.storage.backends.memory.services.{GrpcHealthService, SpansPersistenceService}
import com.expedia.www.haystack.trace.storage.backends.memory.store.InMemoryTraceRecordStore
import io.grpc.netty.NettyServerBuilder
import org.slf4j.{Logger, LoggerFactory}

object Service extends MetricsSupport {
  private val LOGGER: Logger = LoggerFactory.getLogger("MemoryBackend")

  // primary executor for service's async tasks
  implicit private val executor = scala.concurrent.ExecutionContext.global

  def main(args: Array[String]): Unit = {
    startJmxReporter()
    startService(args)
  }

  private def startJmxReporter(): Unit = {
    JmxReporter
      .forRegistry(metricRegistry)
      .build()
      .start()
  }

  private def startService(args: Array[String]): Unit = {
    try {
      val config = new ProjectConfiguration
      val serviceConfig = config.serviceConfig
      var port = serviceConfig.port
      if(args!=null && args.length!=0) {
        port = args(0).toInt
      }

      val tracerRecordStore = new InMemoryTraceRecordStore()

      val serverBuilder = NettyServerBuilder
        .forPort(port)
        .directExecutor()
        .addService(new GrpcHealthService())
        .addService(new SpansPersistenceService(store = tracerRecordStore)(executor))


      // enable ssl if enabled
      if (serviceConfig.ssl.enabled) {
        serverBuilder.useTransportSecurity(new File(serviceConfig.ssl.certChainFilePath), new File(serviceConfig.ssl.privateKeyPath))
      }


      val server = serverBuilder.build().start()

      LOGGER.info(s"server started, listening on ${serviceConfig.port}")

      Runtime.getRuntime.addShutdownHook(new Thread() {
        override def run(): Unit = {
          LOGGER.info("shutting down gRPC server since JVM is shutting down")
          server.shutdown()
          LOGGER.info("server has been shutdown now")
        }
      })

      server.awaitTermination()
    } catch {
      case ex: Throwable =>
        ex.printStackTrace()
        LOGGER.error("Fatal error observed while running the app", ex)
        LoggerUtils.shutdownLogger()
        System.exit(1)
    }
  }
} 
Example 5
Source File: Service.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.storage.backends.cassandra

import java.io.File

import com.codahale.metrics.JmxReporter
import com.expedia.www.haystack.commons.logger.LoggerUtils
import com.expedia.www.haystack.commons.metrics.MetricsSupport
import com.expedia.www.haystack.trace.storage.backends.cassandra.client.{CassandraClusterFactory, CassandraSession}
import com.expedia.www.haystack.trace.storage.backends.cassandra.config.ProjectConfiguration
import com.expedia.www.haystack.trace.storage.backends.cassandra.services.{GrpcHealthService, SpansPersistenceService}
import com.expedia.www.haystack.trace.storage.backends.cassandra.store.{CassandraTraceRecordReader, CassandraTraceRecordWriter}
import io.grpc.netty.NettyServerBuilder
import org.slf4j.{Logger, LoggerFactory}

object Service extends MetricsSupport {
  private val LOGGER: Logger = LoggerFactory.getLogger("CassandraBackend")

  // primary executor for service's async tasks
  implicit private val executor = scala.concurrent.ExecutionContext.global

  def main(args: Array[String]): Unit = {
    startJmxReporter()
    startService()
  }

  private def startJmxReporter(): Unit = {
    JmxReporter
      .forRegistry(metricRegistry)
      .build()
      .start()
  }

  private def startService(): Unit = {
    try {
      val config = new ProjectConfiguration
      val serviceConfig = config.serviceConfig
      val cassandraSession = new CassandraSession(config.cassandraConfig.clientConfig, new CassandraClusterFactory)

      val tracerRecordWriter = new CassandraTraceRecordWriter(cassandraSession, config.cassandraConfig)
      val tracerRecordReader = new CassandraTraceRecordReader(cassandraSession, config.cassandraConfig.clientConfig)

      val serverBuilder = NettyServerBuilder
        .forPort(serviceConfig.port)
        .directExecutor()
        .addService(new GrpcHealthService())
        .addService(new SpansPersistenceService(reader = tracerRecordReader, writer = tracerRecordWriter)(executor))

      // enable ssl if enabled
      if (serviceConfig.ssl.enabled) {
        serverBuilder.useTransportSecurity(new File(serviceConfig.ssl.certChainFilePath), new File(serviceConfig.ssl.privateKeyPath))
      }

      // default max message size in grpc is 4MB. if our max message size is greater than 4MB then we should configure this
      // limit in the netty based grpc server.
      if (serviceConfig.maxSizeInBytes > 4 * 1024 * 1024) serverBuilder.maxMessageSize(serviceConfig.maxSizeInBytes)

      val server = serverBuilder.build().start()

      LOGGER.info(s"server started, listening on ${serviceConfig.port}")

      Runtime.getRuntime.addShutdownHook(new Thread() {
        override def run(): Unit = {
          LOGGER.info("shutting down gRPC server since JVM is shutting down")
          cassandraSession.close()
          server.shutdown()
          LOGGER.info("server has been shutdown now")
        }
      })

      server.awaitTermination()
    } catch {
      case ex: Throwable =>
        ex.printStackTrace()
        LOGGER.error("Fatal error observed while running the app", ex)
        LoggerUtils.shutdownLogger()
        System.exit(1)
    }
  }
} 
Example 6
Source File: App.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.indexer

import com.codahale.metrics.JmxReporter
import com.expedia.www.haystack.commons.health.{HealthController, UpdateHealthStatusFile}
import com.expedia.www.haystack.commons.logger.LoggerUtils
import com.expedia.www.haystack.commons.metrics.MetricsSupport
import com.expedia.www.haystack.trace.indexer.config.ProjectConfiguration
import org.slf4j.LoggerFactory

object App extends MetricsSupport {
  private val LOGGER = LoggerFactory.getLogger(App.getClass)

  private var stream: StreamRunner = _
  private var appConfig: ProjectConfiguration = _

  def main(args: Array[String]): Unit = {
    startJmxReporter()

    try {
      appConfig = new ProjectConfiguration

      HealthController.addListener(new UpdateHealthStatusFile(appConfig.healthStatusFilePath))

      stream = new StreamRunner(
        appConfig.kafkaConfig,
        appConfig.spanAccumulateConfig,
        appConfig.elasticSearchConfig,
        appConfig.backendConfig,
        appConfig.serviceMetadataWriteConfig,
        appConfig.indexConfig)

      Runtime.getRuntime.addShutdownHook(new Thread {
        override def run(): Unit = {
          LOGGER.info("Shutdown hook is invoked, tearing down the application.")
          shutdown()
        }
      })

      stream.start()

      // mark the status of app as 'healthy'
      HealthController.setHealthy()
    } catch {
      case ex: Exception =>
        LOGGER.error("Observed fatal exception while running the app", ex)
        shutdown()
        System.exit(1)
    }
  }

  private def shutdown(): Unit = {
    if(stream != null) stream.close()
    if(appConfig != null) appConfig.close()
    LoggerUtils.shutdownLogger()
  }

  private def startJmxReporter() = {
    val jmxReporter = JmxReporter.forRegistry(metricRegistry).build()
    jmxReporter.start()
  }
}