com.codahale.metrics.jvm.MemoryUsageGaugeSet Scala Examples
The following examples show how to use com.codahale.metrics.jvm.MemoryUsageGaugeSet.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: JvmMetricsSet.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.metrics import java.util import scala.collection.JavaConverters._ import com.codahale.metrics.jvm.{MemoryUsageGaugeSet, ThreadStatesGaugeSet} import com.codahale.metrics.{Metric, MetricSet} class JvmMetricsSet(name: String) extends MetricSet { override def getMetrics: util.Map[String, Metric] = { val memoryMetrics = new MemoryUsageGaugeSet().getMetrics.asScala val threadMetrics = new ThreadStatesGaugeSet().getMetrics.asScala Map( s"$name:memory.total.used" -> memoryMetrics("total.used"), s"$name:memory.total.committed" -> memoryMetrics("total.committed"), s"$name:memory.total.max" -> memoryMetrics("total.max"), s"$name:memory.heap.used" -> memoryMetrics("heap.used"), s"$name:memory.heap.committed" -> memoryMetrics("heap.committed"), s"$name:memory.heap.max" -> memoryMetrics("heap.max"), s"$name:thread.count" -> threadMetrics("count"), s"$name:thread.daemon.count" -> threadMetrics("daemon.count") ).asJava } }
Example 2
Source File: RemoraApp.scala From remora with MIT License | 5 votes |
import java.io.IOException import java.net.ConnectException import java.util.concurrent.{TimeUnit, TimeoutException} import akka.actor.ActorSystem import akka.stream.{ActorMaterializer, ActorMaterializerSettings, Supervision} import com.amazonaws.services.cloudwatch.{AmazonCloudWatchAsync, AmazonCloudWatchAsyncClientBuilder} import com.blacklocus.metrics.CloudWatchReporterBuilder import com.codahale.metrics.jvm.{GarbageCollectorMetricSet, MemoryUsageGaugeSet, ThreadStatesGaugeSet} import com.typesafe.scalalogging.LazyLogging import config.{KafkaSettings, MetricsSettings} import kafka.admin.RemoraKafkaConsumerGroupService import reporter.RemoraDatadogReporter import scala.concurrent.duration._ import scala.util.control.NonFatal object RemoraApp extends App with nl.grons.metrics.scala.DefaultInstrumented with LazyLogging { private val actorSystemName: String = "remora" implicit val actorSystem = ActorSystem(actorSystemName) metricRegistry.registerAll(new GarbageCollectorMetricSet) metricRegistry.registerAll(new MemoryUsageGaugeSet) metricRegistry.registerAll(new ThreadStatesGaugeSet) lazy val decider: Supervision.Decider = { case _: IOException | _: ConnectException | _: TimeoutException => Supervision.Restart case NonFatal(err: Throwable) => actorSystem.log.error(err, "Unhandled Exception in Stream: {}", err.getMessage) Supervision.Stop } implicit val materializer = ActorMaterializer( ActorMaterializerSettings(actorSystem).withSupervisionStrategy(decider))(actorSystem) implicit val executionContext = actorSystem.dispatchers.lookup("kafka-consumer-dispatcher") val kafkaSettings = KafkaSettings(actorSystem.settings.config) val consumer = new RemoraKafkaConsumerGroupService(kafkaSettings) val kafkaClientActor = actorSystem.actorOf(KafkaClientActor.props(consumer), name = "kafka-client-actor") Api(kafkaClientActor).start() val metricsSettings = MetricsSettings(actorSystem.settings.config) if (metricsSettings.registryOptions.enabled) { val exportConsumerMetricsToRegistryActor = actorSystem.actorOf(ExportConsumerMetricsToRegistryActor.props(kafkaClientActor), name = "export-consumer-metrics-actor") actorSystem.scheduler.schedule(0 second, metricsSettings.registryOptions.intervalSeconds second, exportConsumerMetricsToRegistryActor, "export") } if (metricsSettings.cloudWatch.enabled) { logger.info("Reporting metricsRegistry to Cloudwatch") val amazonCloudWatchAsync: AmazonCloudWatchAsync = AmazonCloudWatchAsyncClientBuilder.defaultClient new CloudWatchReporterBuilder() .withNamespace(metricsSettings.cloudWatch.name) .withRegistry(metricRegistry) .withClient(amazonCloudWatchAsync) .build() .start(metricsSettings.cloudWatch.intervalMinutes, TimeUnit.MINUTES) } if (metricsSettings.dataDog.enabled) { logger.info(s"Reporting metricsRegistry to Datadog at ${metricsSettings.dataDog.agentHost}:${metricsSettings.dataDog.agentPort}") val datadogReporter = new RemoraDatadogReporter(metricRegistry, metricsSettings.dataDog) datadogReporter.startReporter() } }
Example 3
Source File: Registry.scala From shield with MIT License | 5 votes |
package shield.metrics import java.util.concurrent.TimeUnit import com.codahale.metrics.json.MetricsModule import com.codahale.metrics.jvm.{ThreadStatesGaugeSet, MemoryUsageGaugeSet, GarbageCollectorMetricSet} import com.codahale.metrics.{JvmAttributeGaugeSet, MetricRegistry} import com.fasterxml.jackson.databind.ObjectMapper import spray.http.{ContentTypes, HttpEntity, StatusCodes, HttpResponse} // todo: Separate metrics per domain object Registry { val metricRegistry = new MetricRegistry() metricRegistry.register("jvm.attribute", new JvmAttributeGaugeSet()) metricRegistry.register("jvm.gc", new GarbageCollectorMetricSet()) metricRegistry.register("jvm.memory", new MemoryUsageGaugeSet()) metricRegistry.register("jvm.threads", new ThreadStatesGaugeSet()) private val mapper = new ObjectMapper() mapper.registerModule(new MetricsModule(TimeUnit.SECONDS, TimeUnit.MILLISECONDS, false)) private val writer = mapper.writerWithDefaultPrettyPrinter() def metricsResponse : HttpResponse = HttpResponse(StatusCodes.OK, HttpEntity(ContentTypes.`application/json`, writer.writeValueAsBytes(metricRegistry) )) }
Example 4
Source File: MetricsExtension.scala From service-container with Apache License 2.0 | 5 votes |
package com.github.vonnagy.service.container.metrics import java.lang.management.ManagementFactory import akka.actor._ import com.codahale.metrics.MetricRegistry import com.codahale.metrics.jvm.{BufferPoolMetricSet, GarbageCollectorMetricSet, MemoryUsageGaugeSet, ThreadStatesGaugeSet} class MetricsExtension(extendedSystem: ExtendedActorSystem) extends Extension { // Allow access to the extended system val system = extendedSystem // The application wide metrics registry. val metricRegistry = new MetricRegistry() // Register the Jvm metrics val srv = ManagementFactory.getPlatformMBeanServer metricRegistry.register("jvm.buffer-pool", new BufferPoolMetricSet(srv)) metricRegistry.register("jvm.gc", new GarbageCollectorMetricSet) metricRegistry.register("jvm.memory", new MemoryUsageGaugeSet) metricRegistry.register("jvm.thread", new ThreadStatesGaugeSet) } object Metrics extends ExtensionId[MetricsExtension] with ExtensionIdProvider { //The lookup method is required by ExtensionIdProvider, // so we return ourselves here, this allows us // to configure our extension to be loaded when // the ActorSystem starts up override def lookup = Metrics //This method will be called by Akka // to instantiate our Extension override def createExtension(system: ExtendedActorSystem) = new MetricsExtension(system) def apply()(implicit system: ActorSystem): MetricsExtension = system.registerExtension(this) }
Example 5
Source File: Init.scala From cave with MIT License | 5 votes |
package init import java.util.concurrent.TimeUnit import com.cave.metrics.data.influxdb.{InfluxClientFactory, InfluxConfiguration} import com.cave.metrics.data.metrics.InternalReporter import com.cave.metrics.data.{AlertManager, AwsConfig, Metric, PasswordHelper} import com.codahale.metrics.MetricRegistry import com.codahale.metrics.jvm.{GarbageCollectorMetricSet, MemoryUsageGaugeSet, ThreadStatesGaugeSet} import com.typesafe.config.ConfigFactory import org.apache.commons.logging.LogFactory import play.api.Play object Init { val metricRegistry = new MetricRegistry private val log = LogFactory.getLog("Init") private val InternalTags = Map(Metric.Organization -> Metric.Internal) private[this] val configuration = Play.current.configuration val baseUrl = configuration.getString("baseUrl").getOrElse("https://api.cavellc.io") val maxTokens = configuration.getInt("maxTokens").getOrElse(3) val serviceConfFile = configuration.getString("serviceConf").getOrElse("api-service.conf") val appConfig = ConfigFactory.load(serviceConfFile).getConfig("api-service") // prepare AWS config and Kinesis data sink val awsConfig = new AwsConfig(appConfig) // a wrapper for required AWS val awsWrapper = new AwsWrapper(awsConfig) // a connection to the InfluxDB backend val influxConfig = appConfig.getConfig("influx") val influxClientFactory = new InfluxClientFactory(InfluxConfiguration(influxConfig)) val alertManager = new AlertManager(awsWrapper.dataManager, influxClientFactory) val mailService = new MailService val passwordHelper = new PasswordHelper def init() { awsWrapper.init() log.warn("Init.init()") val reporter = InternalReporter(registry = metricRegistry) { metrics => metrics foreach(metric => awsWrapper.dataSink.sendMetric(Metric(metric.name, metric.timestamp, metric.value, InternalTags ++ metric.tags))) } reporter.start(1, TimeUnit.MINUTES) metricRegistry.register(MetricRegistry.name("jvm", "gc"), new GarbageCollectorMetricSet()) metricRegistry.register(MetricRegistry.name("jvm", "memory"), new MemoryUsageGaugeSet()) metricRegistry.register(MetricRegistry.name("jvm", "thread-states"), new ThreadStatesGaugeSet()) } def shutdown() { awsWrapper.shutdown() influxClientFactory.close() log.warn("Init.shutdown()") } }