com.codahale.metrics.Gauge Scala Examples
The following examples show how to use com.codahale.metrics.Gauge.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: MesosClusterSchedulerSource.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler.cluster.mesos import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[mesos] class MesosClusterSchedulerSource(scheduler: MesosClusterScheduler) extends Source { override def sourceName: String = "mesos_cluster" override def metricRegistry: MetricRegistry = new MetricRegistry() metricRegistry.register(MetricRegistry.name("waitingDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getQueuedDriversSize }) metricRegistry.register(MetricRegistry.name("launchedDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getLaunchedDriversSize }) metricRegistry.register(MetricRegistry.name("retryDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getPendingRetryDriversSize }) }
Example 2
Source File: DAGSchedulerSource.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler import com.codahale.metrics.{Gauge, MetricRegistry, Timer} import org.apache.spark.metrics.source.Source private[scheduler] class DAGSchedulerSource(val dagScheduler: DAGScheduler) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "DAGScheduler" metricRegistry.register(MetricRegistry.name("stage", "failedStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.failedStages.size }) metricRegistry.register(MetricRegistry.name("stage", "runningStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.runningStages.size }) metricRegistry.register(MetricRegistry.name("stage", "waitingStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.waitingStages.size }) metricRegistry.register(MetricRegistry.name("job", "allJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.numTotalJobs }) metricRegistry.register(MetricRegistry.name("job", "activeJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.activeJobs.size }) val messageProcessingTimer: Timer = metricRegistry.timer(MetricRegistry.name("messageProcessingTime")) }
Example 3
Source File: BlockManagerSource.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.storage import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class BlockManagerSource(val blockManager: BlockManager) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "BlockManager" metricRegistry.register(MetricRegistry.name("memory", "maxMem_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val maxMem = storageStatusList.map(_.maxMem).sum maxMem / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("memory", "remainingMem_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val remainingMem = storageStatusList.map(_.memRemaining).sum remainingMem / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("memory", "memUsed_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val memUsed = storageStatusList.map(_.memUsed).sum memUsed / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("disk", "diskSpaceUsed_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val diskSpaceUsed = storageStatusList.map(_.diskUsed).sum diskSpaceUsed / 1024 / 1024 } }) }
Example 4
Source File: ExecutorSource.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.executor import java.util.concurrent.ThreadPoolExecutor import scala.collection.JavaConverters._ import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.hadoop.fs.FileSystem import org.apache.spark.metrics.source.Source private[spark] class ExecutorSource(threadPool: ThreadPoolExecutor, executorId: String) extends Source { private def fileStats(scheme: String) : Option[FileSystem.Statistics] = FileSystem.getAllStatistics.asScala.find(s => s.getScheme.equals(scheme)) private def registerFileSystemStat[T]( scheme: String, name: String, f: FileSystem.Statistics => T, defaultValue: T) = { metricRegistry.register(MetricRegistry.name("filesystem", scheme, name), new Gauge[T] { override def getValue: T = fileStats(scheme).map(f).getOrElse(defaultValue) }) } override val metricRegistry = new MetricRegistry() override val sourceName = "executor" // Gauge for executor thread pool's actively executing task counts metricRegistry.register(MetricRegistry.name("threadpool", "activeTasks"), new Gauge[Int] { override def getValue: Int = threadPool.getActiveCount() }) // Gauge for executor thread pool's approximate total number of tasks that have been completed metricRegistry.register(MetricRegistry.name("threadpool", "completeTasks"), new Gauge[Long] { override def getValue: Long = threadPool.getCompletedTaskCount() }) // Gauge for executor thread pool's current number of threads metricRegistry.register(MetricRegistry.name("threadpool", "currentPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getPoolSize() }) // Gauge got executor thread pool's largest number of threads that have ever simultaneously // been in th pool metricRegistry.register(MetricRegistry.name("threadpool", "maxPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getMaximumPoolSize() }) // Gauge for file system stats of this executor for (scheme <- Array("hdfs", "file")) { registerFileSystemStat(scheme, "read_bytes", _.getBytesRead(), 0L) registerFileSystemStat(scheme, "write_bytes", _.getBytesWritten(), 0L) registerFileSystemStat(scheme, "read_ops", _.getReadOps(), 0) registerFileSystemStat(scheme, "largeRead_ops", _.getLargeReadOps(), 0) registerFileSystemStat(scheme, "write_ops", _.getWriteOps(), 0) } }
Example 5
Source File: MasterSource.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class MasterSource(val master: Master) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "master" // Gauge for worker numbers in cluster metricRegistry.register(MetricRegistry.name("workers"), new Gauge[Int] { override def getValue: Int = master.workers.size }) // Gauge for alive worker numbers in cluster metricRegistry.register(MetricRegistry.name("aliveWorkers"), new Gauge[Int]{ override def getValue: Int = master.workers.filter(_.state == WorkerState.ALIVE).size }) // Gauge for application numbers in cluster metricRegistry.register(MetricRegistry.name("apps"), new Gauge[Int] { override def getValue: Int = master.apps.size }) // Gauge for waiting application numbers in cluster metricRegistry.register(MetricRegistry.name("waitingApps"), new Gauge[Int] { override def getValue: Int = master.waitingApps.size }) }
Example 6
Source File: WorkerSource.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.worker import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[worker] class WorkerSource(val worker: Worker) extends Source { override val sourceName = "worker" override val metricRegistry = new MetricRegistry() metricRegistry.register(MetricRegistry.name("executors"), new Gauge[Int] { override def getValue: Int = worker.executors.size }) // Gauge for cores used of this worker metricRegistry.register(MetricRegistry.name("coresUsed"), new Gauge[Int] { override def getValue: Int = worker.coresUsed }) // Gauge for memory used of this worker metricRegistry.register(MetricRegistry.name("memUsed_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryUsed }) // Gauge for cores free of this worker metricRegistry.register(MetricRegistry.name("coresFree"), new Gauge[Int] { override def getValue: Int = worker.coresFree }) // Gauge for memory free of this worker metricRegistry.register(MetricRegistry.name("memFree_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryFree }) }
Example 7
Source File: DAGSchedulerSource.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class DAGSchedulerSource(val dagScheduler: DAGScheduler) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "DAGScheduler" metricRegistry.register(MetricRegistry.name("stage", "failedStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.failedStages.size }) metricRegistry.register(MetricRegistry.name("stage", "runningStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.runningStages.size }) metricRegistry.register(MetricRegistry.name("stage", "waitingStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.waitingStages.size }) metricRegistry.register(MetricRegistry.name("job", "allJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.numTotalJobs }) metricRegistry.register(MetricRegistry.name("job", "activeJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.activeJobs.size }) }
Example 8
Source File: MesosClusterSchedulerSource.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler.cluster.mesos import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[mesos] class MesosClusterSchedulerSource(scheduler: MesosClusterScheduler) extends Source { override def sourceName: String = "mesos_cluster" override def metricRegistry: MetricRegistry = new MetricRegistry() metricRegistry.register(MetricRegistry.name("waitingDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getQueuedDriversSize }) metricRegistry.register(MetricRegistry.name("launchedDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getLaunchedDriversSize }) metricRegistry.register(MetricRegistry.name("retryDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getPendingRetryDriversSize }) }
Example 9
Source File: BlockManagerSource.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.storage import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class BlockManagerSource(val blockManager: BlockManager) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "BlockManager" metricRegistry.register(MetricRegistry.name("memory", "maxMem_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val maxMem = storageStatusList.map(_.maxMem).sum maxMem / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("memory", "remainingMem_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val remainingMem = storageStatusList.map(_.memRemaining).sum remainingMem / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("memory", "memUsed_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val memUsed = storageStatusList.map(_.memUsed).sum memUsed / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("disk", "diskSpaceUsed_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val diskSpaceUsed = storageStatusList.map(_.diskUsed).sum diskSpaceUsed / 1024 / 1024 } }) }
Example 10
Source File: ExecutorSource.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.executor import java.util.concurrent.ThreadPoolExecutor import scala.collection.JavaConversions._ import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.hadoop.fs.FileSystem import org.apache.spark.metrics.source.Source private[spark] class ExecutorSource(threadPool: ThreadPoolExecutor, executorId: String) extends Source { private def fileStats(scheme: String) : Option[FileSystem.Statistics] = FileSystem.getAllStatistics().find(s => s.getScheme.equals(scheme)) private def registerFileSystemStat[T]( scheme: String, name: String, f: FileSystem.Statistics => T, defaultValue: T) = { metricRegistry.register(MetricRegistry.name("filesystem", scheme, name), new Gauge[T] { override def getValue: T = fileStats(scheme).map(f).getOrElse(defaultValue) }) } override val metricRegistry = new MetricRegistry() override val sourceName = "executor" // Gauge for executor thread pool's actively executing task counts metricRegistry.register(MetricRegistry.name("threadpool", "activeTasks"), new Gauge[Int] { override def getValue: Int = threadPool.getActiveCount() }) // Gauge for executor thread pool's approximate total number of tasks that have been completed metricRegistry.register(MetricRegistry.name("threadpool", "completeTasks"), new Gauge[Long] { override def getValue: Long = threadPool.getCompletedTaskCount() }) // Gauge for executor thread pool's current number of threads metricRegistry.register(MetricRegistry.name("threadpool", "currentPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getPoolSize() }) // Gauge got executor thread pool's largest number of threads that have ever simultaneously // been in th pool metricRegistry.register(MetricRegistry.name("threadpool", "maxPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getMaximumPoolSize() }) // Gauge for file system stats of this executor for (scheme <- Array("hdfs", "file")) { registerFileSystemStat(scheme, "read_bytes", _.getBytesRead(), 0L) registerFileSystemStat(scheme, "write_bytes", _.getBytesWritten(), 0L) registerFileSystemStat(scheme, "read_ops", _.getReadOps(), 0) registerFileSystemStat(scheme, "largeRead_ops", _.getLargeReadOps(), 0) registerFileSystemStat(scheme, "write_ops", _.getWriteOps(), 0) } }
Example 11
Source File: MasterSource.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class MasterSource(val master: Master) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "master" // Gauge for worker numbers in cluster // 测量集群中Woker数 metricRegistry.register(MetricRegistry.name("workers"), new Gauge[Int] { override def getValue: Int = master.workers.size }) // Gauge for alive worker numbers in cluster //测量集群中存活的Woker数 metricRegistry.register(MetricRegistry.name("aliveWorkers"), new Gauge[Int]{ override def getValue: Int = master.workers.filter(_.state == WorkerState.ALIVE).size }) // Gauge for application numbers in cluster // 测量集群中应用程序数 metricRegistry.register(MetricRegistry.name("apps"), new Gauge[Int] { override def getValue: Int = master.apps.size }) // Gauge for waiting application numbers in cluster //测量集群中等待应用程序数 metricRegistry.register(MetricRegistry.name("waitingApps"), new Gauge[Int] { override def getValue: Int = master.waitingApps.size }) }
Example 12
Source File: WorkerSource.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.worker import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[worker] class WorkerSource(val worker: Worker) extends Source { override val sourceName = "worker" override val metricRegistry = new MetricRegistry() metricRegistry.register(MetricRegistry.name("executors"), new Gauge[Int] { override def getValue: Int = worker.executors.size }) // Gauge for cores used of this worker 这个worker使用的核数 metricRegistry.register(MetricRegistry.name("coresUsed"), new Gauge[Int] { override def getValue: Int = worker.coresUsed }) // Gauge for memory used of this worker 用于此worker的内存数 metricRegistry.register(MetricRegistry.name("memUsed_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryUsed }) // Gauge for cores free of this worker 用于此worker的可用核数 metricRegistry.register(MetricRegistry.name("coresFree"), new Gauge[Int] { override def getValue: Int = worker.coresFree }) // Gauge for memory free of this worker 用于此worker的可用内存数 metricRegistry.register(MetricRegistry.name("memFree_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryFree }) }
Example 13
Source File: DAGSchedulerSource.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler import com.codahale.metrics.{Gauge, MetricRegistry, Timer} import org.apache.spark.metrics.source.Source private[scheduler] class DAGSchedulerSource(val dagScheduler: DAGScheduler) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "DAGScheduler" metricRegistry.register(MetricRegistry.name("stage", "failedStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.failedStages.size }) metricRegistry.register(MetricRegistry.name("stage", "runningStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.runningStages.size }) metricRegistry.register(MetricRegistry.name("stage", "waitingStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.waitingStages.size }) metricRegistry.register(MetricRegistry.name("job", "allJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.numTotalJobs }) metricRegistry.register(MetricRegistry.name("job", "activeJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.activeJobs.size }) val messageProcessingTimer: Timer = metricRegistry.timer(MetricRegistry.name("messageProcessingTime")) }
Example 14
Source File: WorkerSource.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.worker import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[worker] class WorkerSource(val worker: Worker) extends Source { override val sourceName = "worker" override val metricRegistry = new MetricRegistry() metricRegistry.register(MetricRegistry.name("executors"), new Gauge[Int] { override def getValue: Int = worker.executors.size }) // Gauge for cores used of this worker metricRegistry.register(MetricRegistry.name("coresUsed"), new Gauge[Int] { override def getValue: Int = worker.coresUsed }) // Gauge for memory used of this worker metricRegistry.register(MetricRegistry.name("memUsed_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryUsed }) // Gauge for cores free of this worker metricRegistry.register(MetricRegistry.name("coresFree"), new Gauge[Int] { override def getValue: Int = worker.coresFree }) // Gauge for memory free of this worker metricRegistry.register(MetricRegistry.name("memFree_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryFree }) }
Example 15
Source File: BlockManagerSource.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.storage import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class BlockManagerSource(val blockManager: BlockManager) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "BlockManager" metricRegistry.register(MetricRegistry.name("memory", "maxMem_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val maxMem = storageStatusList.map(_.maxMem).sum maxMem / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("memory", "remainingMem_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val remainingMem = storageStatusList.map(_.memRemaining).sum remainingMem / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("memory", "memUsed_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val memUsed = storageStatusList.map(_.memUsed).sum memUsed / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("disk", "diskSpaceUsed_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val diskSpaceUsed = storageStatusList.map(_.diskUsed).sum diskSpaceUsed / 1024 / 1024 } }) }
Example 16
Source File: MetricsReporter.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.streaming import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.internal.Logging import org.apache.spark.metrics.source.{Source => CodahaleSource} import org.apache.spark.sql.streaming.StreamingQueryProgress class MetricsReporter( stream: StreamExecution, override val sourceName: String) extends CodahaleSource with Logging { override val metricRegistry: MetricRegistry = new MetricRegistry // Metric names should not have . in them, so that all the metrics of a query are identified // together in Ganglia as a single metric group registerGauge("inputRate-total", _.inputRowsPerSecond, 0.0) registerGauge("processingRate-total", _.processedRowsPerSecond, 0.0) registerGauge("latency", _.durationMs.get("triggerExecution").longValue(), 0L) private def registerGauge[T]( name: String, f: StreamingQueryProgress => T, default: T): Unit = { synchronized { metricRegistry.register(name, new Gauge[T] { override def getValue: T = Option(stream.lastProgress).map(f).getOrElse(default) }) } } }
Example 17
Source File: MasterSource.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class MasterSource(val master: Master) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "master" // Gauge for worker numbers in cluster metricRegistry.register(MetricRegistry.name("workers"), new Gauge[Int] { override def getValue: Int = master.workers.size }) // Gauge for alive worker numbers in cluster metricRegistry.register(MetricRegistry.name("aliveWorkers"), new Gauge[Int]{ override def getValue: Int = master.workers.count(_.state == WorkerState.ALIVE) }) // Gauge for application numbers in cluster metricRegistry.register(MetricRegistry.name("apps"), new Gauge[Int] { override def getValue: Int = master.apps.size }) // Gauge for waiting application numbers in cluster metricRegistry.register(MetricRegistry.name("waitingApps"), new Gauge[Int] { override def getValue: Int = master.apps.count(_.state == ApplicationState.WAITING) }) }
Example 18
Source File: WorkerSource.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.worker import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[worker] class WorkerSource(val worker: Worker) extends Source { override val sourceName = "worker" override val metricRegistry = new MetricRegistry() metricRegistry.register(MetricRegistry.name("executors"), new Gauge[Int] { override def getValue: Int = worker.executors.size }) // Gauge for cores used of this worker metricRegistry.register(MetricRegistry.name("coresUsed"), new Gauge[Int] { override def getValue: Int = worker.coresUsed }) // Gauge for memory used of this worker metricRegistry.register(MetricRegistry.name("memUsed_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryUsed }) // Gauge for cores free of this worker metricRegistry.register(MetricRegistry.name("coresFree"), new Gauge[Int] { override def getValue: Int = worker.coresFree }) // Gauge for memory free of this worker metricRegistry.register(MetricRegistry.name("memFree_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryFree }) }
Example 19
Source File: DAGSchedulerSource.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler import com.codahale.metrics.{Gauge, MetricRegistry, Timer} import org.apache.spark.metrics.source.Source private[scheduler] class DAGSchedulerSource(val dagScheduler: DAGScheduler) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "DAGScheduler" metricRegistry.register(MetricRegistry.name("stage", "failedStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.failedStages.size }) metricRegistry.register(MetricRegistry.name("stage", "runningStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.runningStages.size }) metricRegistry.register(MetricRegistry.name("stage", "waitingStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.waitingStages.size }) metricRegistry.register(MetricRegistry.name("job", "allJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.numTotalJobs }) metricRegistry.register(MetricRegistry.name("job", "activeJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.activeJobs.size }) val messageProcessingTimer: Timer = metricRegistry.timer(MetricRegistry.name("messageProcessingTime")) }
Example 20
Source File: MasterSource.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class MasterSource(val master: Master) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "master" // Gauge for worker numbers in cluster metricRegistry.register(MetricRegistry.name("workers"), new Gauge[Int] { override def getValue: Int = master.workers.size }) // Gauge for alive worker numbers in cluster metricRegistry.register(MetricRegistry.name("aliveWorkers"), new Gauge[Int]{ override def getValue: Int = master.workers.filter(_.state == WorkerState.ALIVE).size }) // Gauge for application numbers in cluster metricRegistry.register(MetricRegistry.name("apps"), new Gauge[Int] { override def getValue: Int = master.apps.size }) // Gauge for waiting application numbers in cluster metricRegistry.register(MetricRegistry.name("waitingApps"), new Gauge[Int] { override def getValue: Int = master.waitingApps.size }) }
Example 21
Source File: WorkerSource.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.worker import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[worker] class WorkerSource(val worker: Worker) extends Source { override val sourceName = "worker" override val metricRegistry = new MetricRegistry() metricRegistry.register(MetricRegistry.name("executors"), new Gauge[Int] { override def getValue: Int = worker.executors.size }) // Gauge for cores used of this worker metricRegistry.register(MetricRegistry.name("coresUsed"), new Gauge[Int] { override def getValue: Int = worker.coresUsed }) // Gauge for memory used of this worker metricRegistry.register(MetricRegistry.name("memUsed_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryUsed }) // Gauge for cores free of this worker metricRegistry.register(MetricRegistry.name("coresFree"), new Gauge[Int] { override def getValue: Int = worker.coresFree }) // Gauge for memory free of this worker metricRegistry.register(MetricRegistry.name("memFree_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryFree }) }
Example 22
Source File: DAGSchedulerSource.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler import com.codahale.metrics.{Gauge, MetricRegistry, Timer} import org.apache.spark.metrics.source.Source private[scheduler] class DAGSchedulerSource(val dagScheduler: DAGScheduler) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "DAGScheduler" metricRegistry.register(MetricRegistry.name("stage", "failedStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.failedStages.size }) metricRegistry.register(MetricRegistry.name("stage", "runningStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.runningStages.size }) metricRegistry.register(MetricRegistry.name("stage", "waitingStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.waitingStages.size }) metricRegistry.register(MetricRegistry.name("job", "allJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.numTotalJobs }) metricRegistry.register(MetricRegistry.name("job", "activeJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.activeJobs.size }) val messageProcessingTimer: Timer = metricRegistry.timer(MetricRegistry.name("messageProcessingTime")) }
Example 23
Source File: MesosClusterSchedulerSource.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler.cluster.mesos import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[mesos] class MesosClusterSchedulerSource(scheduler: MesosClusterScheduler) extends Source { override def sourceName: String = "mesos_cluster" override def metricRegistry: MetricRegistry = new MetricRegistry() metricRegistry.register(MetricRegistry.name("waitingDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getQueuedDriversSize }) metricRegistry.register(MetricRegistry.name("launchedDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getLaunchedDriversSize }) metricRegistry.register(MetricRegistry.name("retryDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getPendingRetryDriversSize }) }
Example 24
Source File: BlockManagerSource.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.storage import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class BlockManagerSource(val blockManager: BlockManager) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "BlockManager" metricRegistry.register(MetricRegistry.name("memory", "maxMem_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val maxMem = storageStatusList.map(_.maxMem).sum maxMem / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("memory", "remainingMem_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val remainingMem = storageStatusList.map(_.memRemaining).sum remainingMem / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("memory", "memUsed_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val memUsed = storageStatusList.map(_.memUsed).sum memUsed / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("disk", "diskSpaceUsed_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val diskSpaceUsed = storageStatusList.map(_.diskUsed).sum diskSpaceUsed / 1024 / 1024 } }) }
Example 25
Source File: ExecutorSource.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.executor import java.util.concurrent.ThreadPoolExecutor import scala.collection.JavaConverters._ import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.hadoop.fs.FileSystem import org.apache.spark.metrics.source.Source private[spark] class ExecutorSource(threadPool: ThreadPoolExecutor, executorId: String) extends Source { private def fileStats(scheme: String) : Option[FileSystem.Statistics] = FileSystem.getAllStatistics.asScala.find(s => s.getScheme.equals(scheme)) private def registerFileSystemStat[T]( scheme: String, name: String, f: FileSystem.Statistics => T, defaultValue: T) = { metricRegistry.register(MetricRegistry.name("filesystem", scheme, name), new Gauge[T] { override def getValue: T = fileStats(scheme).map(f).getOrElse(defaultValue) }) } override val metricRegistry = new MetricRegistry() override val sourceName = "executor" // Gauge for executor thread pool's actively executing task counts metricRegistry.register(MetricRegistry.name("threadpool", "activeTasks"), new Gauge[Int] { override def getValue: Int = threadPool.getActiveCount() }) // Gauge for executor thread pool's approximate total number of tasks that have been completed metricRegistry.register(MetricRegistry.name("threadpool", "completeTasks"), new Gauge[Long] { override def getValue: Long = threadPool.getCompletedTaskCount() }) // Gauge for executor thread pool's current number of threads metricRegistry.register(MetricRegistry.name("threadpool", "currentPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getPoolSize() }) // Gauge got executor thread pool's largest number of threads that have ever simultaneously // been in th pool metricRegistry.register(MetricRegistry.name("threadpool", "maxPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getMaximumPoolSize() }) // Gauge for file system stats of this executor for (scheme <- Array("hdfs", "file")) { registerFileSystemStat(scheme, "read_bytes", _.getBytesRead(), 0L) registerFileSystemStat(scheme, "write_bytes", _.getBytesWritten(), 0L) registerFileSystemStat(scheme, "read_ops", _.getReadOps(), 0) registerFileSystemStat(scheme, "largeRead_ops", _.getLargeReadOps(), 0) registerFileSystemStat(scheme, "write_ops", _.getWriteOps(), 0) } }
Example 26
Source File: OffsetGraphiteReporter.scala From kafka-offset-monitor-graphite with Apache License 2.0 | 5 votes |
package pl.allegro.tech.kafka.offset.monitor.graphite import java.net.InetSocketAddress import java.util.concurrent.TimeUnit import com.codahale.metrics.{MetricRegistry, MetricFilter} import com.codahale.metrics.graphite.{GraphiteReporter, Graphite} import com.google.common.cache._ import com.quantifind.kafka.OffsetGetter.OffsetInfo import com.codahale.metrics.Gauge class OffsetGraphiteReporter (pluginsArgs: String) extends com.quantifind.kafka.offsetapp.OffsetInfoReporter { GraphiteReporterArguments.parseArguments(pluginsArgs) val metrics : MetricRegistry = new MetricRegistry() val graphite : Graphite = new Graphite(new InetSocketAddress(GraphiteReporterArguments.graphiteHost, GraphiteReporterArguments.graphitePort)) val reporter : GraphiteReporter = GraphiteReporter.forRegistry(metrics) .prefixedWith(GraphiteReporterArguments.graphitePrefix) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .filter(MetricFilter.ALL) .build(graphite) reporter.start(GraphiteReporterArguments.graphiteReportPeriod, TimeUnit.SECONDS) val removalListener : RemovalListener[String, GaugesValues] = new RemovalListener[String, GaugesValues] { override def onRemoval(removalNotification: RemovalNotification[String, GaugesValues]) = { metrics.remove(removalNotification.getKey() + ".offset") metrics.remove(removalNotification.getKey() + ".logSize") metrics.remove(removalNotification.getKey() + ".lag") } } val gauges : LoadingCache[String, GaugesValues] = CacheBuilder.newBuilder() .expireAfterAccess(GraphiteReporterArguments.metricsCacheExpireSeconds, TimeUnit.SECONDS) .removalListener(removalListener) .build( new CacheLoader[String, GaugesValues]() { def load(key: String): GaugesValues = { val values: GaugesValues = new GaugesValues() val offsetGauge: Gauge[Long] = new Gauge[Long] { override def getValue: Long = { values.offset } } val lagGauge: Gauge[Long] = new Gauge[Long] { override def getValue: Long = { values.lag } } val logSizeGauge: Gauge[Long] = new Gauge[Long] { override def getValue: Long = { values.logSize } } metrics.register(key + ".offset", offsetGauge) metrics.register(key + ".logSize", logSizeGauge) metrics.register(key + ".lag", lagGauge) values } } ) override def report(info: scala.IndexedSeq[OffsetInfo]) = { info.foreach(i => { val values: GaugesValues = gauges.get(getMetricName(i)) values.logSize = i.logSize values.offset = i.offset values.lag = i.lag }) } def getMetricName(offsetInfo: OffsetInfo): String = { offsetInfo.topic.replace(".", "_") + "." + offsetInfo.group.replace(".", "_") + "." + offsetInfo.partition } }
Example 27
Source File: BlockManagerSource.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.storage import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class BlockManagerSource(val blockManager: BlockManager) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "BlockManager" metricRegistry.register(MetricRegistry.name("memory", "maxMem_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val maxMem = storageStatusList.map(_.maxMem).sum maxMem / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("memory", "remainingMem_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val remainingMem = storageStatusList.map(_.memRemaining).sum remainingMem / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("memory", "memUsed_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val memUsed = storageStatusList.map(_.memUsed).sum memUsed / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("disk", "diskSpaceUsed_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val diskSpaceUsed = storageStatusList.map(_.diskUsed).sum diskSpaceUsed / 1024 / 1024 } }) }
Example 28
Source File: MesosClusterSchedulerSource.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler.cluster.mesos import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[mesos] class MesosClusterSchedulerSource(scheduler: MesosClusterScheduler) extends Source { override def sourceName: String = "mesos_cluster" override def metricRegistry: MetricRegistry = new MetricRegistry() metricRegistry.register(MetricRegistry.name("waitingDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getQueuedDriversSize }) metricRegistry.register(MetricRegistry.name("launchedDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getLaunchedDriversSize }) metricRegistry.register(MetricRegistry.name("retryDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getPendingRetryDriversSize }) }
Example 29
Source File: MasterSource.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class MasterSource(val master: Master) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "master" // Gauge for worker numbers in cluster metricRegistry.register(MetricRegistry.name("workers"), new Gauge[Int] { override def getValue: Int = master.workers.size }) // Gauge for alive worker numbers in cluster metricRegistry.register(MetricRegistry.name("aliveWorkers"), new Gauge[Int]{ override def getValue: Int = master.workers.count(_.state == WorkerState.ALIVE) }) // Gauge for application numbers in cluster metricRegistry.register(MetricRegistry.name("apps"), new Gauge[Int] { override def getValue: Int = master.apps.size }) // Gauge for waiting application numbers in cluster metricRegistry.register(MetricRegistry.name("waitingApps"), new Gauge[Int] { override def getValue: Int = master.apps.count(_.state == ApplicationState.WAITING) }) }
Example 30
Source File: WorkerSource.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.worker import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[worker] class WorkerSource(val worker: Worker) extends Source { override val sourceName = "worker" override val metricRegistry = new MetricRegistry() metricRegistry.register(MetricRegistry.name("executors"), new Gauge[Int] { override def getValue: Int = worker.executors.size }) // Gauge for cores used of this worker metricRegistry.register(MetricRegistry.name("coresUsed"), new Gauge[Int] { override def getValue: Int = worker.coresUsed }) // Gauge for memory used of this worker metricRegistry.register(MetricRegistry.name("memUsed_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryUsed }) // Gauge for cores free of this worker metricRegistry.register(MetricRegistry.name("coresFree"), new Gauge[Int] { override def getValue: Int = worker.coresFree }) // Gauge for memory free of this worker metricRegistry.register(MetricRegistry.name("memFree_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryFree }) }
Example 31
Source File: DAGSchedulerSource.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler import com.codahale.metrics.{Gauge, MetricRegistry, Timer} import org.apache.spark.metrics.source.Source private[scheduler] class DAGSchedulerSource(val dagScheduler: DAGScheduler) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "DAGScheduler" metricRegistry.register(MetricRegistry.name("stage", "failedStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.failedStages.size }) metricRegistry.register(MetricRegistry.name("stage", "runningStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.runningStages.size }) metricRegistry.register(MetricRegistry.name("stage", "waitingStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.waitingStages.size }) metricRegistry.register(MetricRegistry.name("job", "allJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.numTotalJobs }) metricRegistry.register(MetricRegistry.name("job", "activeJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.activeJobs.size }) val messageProcessingTimer: Timer = metricRegistry.timer(MetricRegistry.name("messageProcessingTime")) }
Example 32
Source File: BlockManagerSource.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.storage import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class BlockManagerSource(val blockManager: BlockManager) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "BlockManager" metricRegistry.register(MetricRegistry.name("memory", "maxMem_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val maxMem = storageStatusList.map(_.maxMem).sum maxMem / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("memory", "remainingMem_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val remainingMem = storageStatusList.map(_.memRemaining).sum remainingMem / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("memory", "memUsed_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val memUsed = storageStatusList.map(_.memUsed).sum memUsed / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("disk", "diskSpaceUsed_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val diskSpaceUsed = storageStatusList.map(_.diskUsed).sum diskSpaceUsed / 1024 / 1024 } }) }
Example 33
Source File: ExecutorSource.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.executor import java.util.concurrent.ThreadPoolExecutor import scala.collection.JavaConverters._ import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.hadoop.fs.FileSystem import org.apache.spark.metrics.source.Source private[spark] class ExecutorSource(threadPool: ThreadPoolExecutor, executorId: String) extends Source { private def fileStats(scheme: String) : Option[FileSystem.Statistics] = FileSystem.getAllStatistics.asScala.find(s => s.getScheme.equals(scheme)) private def registerFileSystemStat[T]( scheme: String, name: String, f: FileSystem.Statistics => T, defaultValue: T) = { metricRegistry.register(MetricRegistry.name("filesystem", scheme, name), new Gauge[T] { override def getValue: T = fileStats(scheme).map(f).getOrElse(defaultValue) }) } override val metricRegistry = new MetricRegistry() override val sourceName = "executor" // Gauge for executor thread pool's actively executing task counts metricRegistry.register(MetricRegistry.name("threadpool", "activeTasks"), new Gauge[Int] { override def getValue: Int = threadPool.getActiveCount() }) // Gauge for executor thread pool's approximate total number of tasks that have been completed metricRegistry.register(MetricRegistry.name("threadpool", "completeTasks"), new Gauge[Long] { override def getValue: Long = threadPool.getCompletedTaskCount() }) // Gauge for executor thread pool's current number of threads metricRegistry.register(MetricRegistry.name("threadpool", "currentPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getPoolSize() }) // Gauge got executor thread pool's largest number of threads that have ever simultaneously // been in th pool metricRegistry.register(MetricRegistry.name("threadpool", "maxPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getMaximumPoolSize() }) // Gauge for file system stats of this executor for (scheme <- Array("hdfs", "file")) { registerFileSystemStat(scheme, "read_bytes", _.getBytesRead(), 0L) registerFileSystemStat(scheme, "write_bytes", _.getBytesWritten(), 0L) registerFileSystemStat(scheme, "read_ops", _.getReadOps(), 0) registerFileSystemStat(scheme, "largeRead_ops", _.getLargeReadOps(), 0) registerFileSystemStat(scheme, "write_ops", _.getWriteOps(), 0) } }
Example 34
Source File: MetricsReporter.scala From XSQL with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.streaming import java.text.SimpleDateFormat import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.internal.Logging import org.apache.spark.metrics.source.{Source => CodahaleSource} import org.apache.spark.sql.catalyst.util.DateTimeUtils import org.apache.spark.sql.streaming.StreamingQueryProgress class MetricsReporter( stream: StreamExecution, override val sourceName: String) extends CodahaleSource with Logging { override val metricRegistry: MetricRegistry = new MetricRegistry // Metric names should not have . in them, so that all the metrics of a query are identified // together in Ganglia as a single metric group registerGauge("inputRate-total", _.inputRowsPerSecond, 0.0) registerGauge("processingRate-total", _.processedRowsPerSecond, 0.0) registerGauge("latency", _.durationMs.get("triggerExecution").longValue(), 0L) private val timestampFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'") // ISO8601 timestampFormat.setTimeZone(DateTimeUtils.getTimeZone("UTC")) registerGauge("eventTime-watermark", progress => convertStringDateToMillis(progress.eventTime.get("watermark")), 0L) registerGauge("states-rowsTotal", _.stateOperators.map(_.numRowsTotal).sum, 0L) registerGauge("states-usedBytes", _.stateOperators.map(_.memoryUsedBytes).sum, 0L) private def convertStringDateToMillis(isoUtcDateStr: String) = { if (isoUtcDateStr != null) { timestampFormat.parse(isoUtcDateStr).getTime } else { 0L } } private def registerGauge[T]( name: String, f: StreamingQueryProgress => T, default: T): Unit = { synchronized { metricRegistry.register(name, new Gauge[T] { override def getValue: T = Option(stream.lastProgress).map(f).getOrElse(default) }) } } }
Example 35
Source File: MesosClusterSchedulerSource.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler.cluster.mesos import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[mesos] class MesosClusterSchedulerSource(scheduler: MesosClusterScheduler) extends Source { override def sourceName: String = "mesos_cluster" override def metricRegistry: MetricRegistry = new MetricRegistry() metricRegistry.register(MetricRegistry.name("waitingDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getQueuedDriversSize }) metricRegistry.register(MetricRegistry.name("launchedDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getLaunchedDriversSize }) metricRegistry.register(MetricRegistry.name("retryDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getPendingRetryDriversSize }) }
Example 36
Source File: MetricsReporter.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.streaming import java.{util => ju} import scala.collection.mutable import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.internal.Logging import org.apache.spark.metrics.source.{Source => CodahaleSource} import org.apache.spark.util.Clock class MetricsReporter( stream: StreamExecution, override val sourceName: String) extends CodahaleSource with Logging { override val metricRegistry: MetricRegistry = new MetricRegistry // Metric names should not have . in them, so that all the metrics of a query are identified // together in Ganglia as a single metric group registerGauge("inputRate-total", () => stream.lastProgress.inputRowsPerSecond) registerGauge("processingRate-total", () => stream.lastProgress.inputRowsPerSecond) registerGauge("latency", () => stream.lastProgress.durationMs.get("triggerExecution").longValue()) private def registerGauge[T](name: String, f: () => T)(implicit num: Numeric[T]): Unit = { synchronized { metricRegistry.register(name, new Gauge[T] { override def getValue: T = f() }) } } }
Example 37
Source File: MasterSource.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class MasterSource(val master: Master) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "master" // Gauge for worker numbers in cluster metricRegistry.register(MetricRegistry.name("workers"), new Gauge[Int] { override def getValue: Int = master.workers.size }) // Gauge for alive worker numbers in cluster metricRegistry.register(MetricRegistry.name("aliveWorkers"), new Gauge[Int]{ override def getValue: Int = master.workers.count(_.state == WorkerState.ALIVE) }) // Gauge for application numbers in cluster metricRegistry.register(MetricRegistry.name("apps"), new Gauge[Int] { override def getValue: Int = master.apps.size }) // Gauge for waiting application numbers in cluster metricRegistry.register(MetricRegistry.name("waitingApps"), new Gauge[Int] { override def getValue: Int = master.apps.count(_.state == ApplicationState.WAITING) }) }
Example 38
Source File: WorkerSource.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.worker import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[worker] class WorkerSource(val worker: Worker) extends Source { override val sourceName = "worker" override val metricRegistry = new MetricRegistry() metricRegistry.register(MetricRegistry.name("executors"), new Gauge[Int] { override def getValue: Int = worker.executors.size }) // Gauge for cores used of this worker metricRegistry.register(MetricRegistry.name("coresUsed"), new Gauge[Int] { override def getValue: Int = worker.coresUsed }) // Gauge for memory used of this worker metricRegistry.register(MetricRegistry.name("memUsed_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryUsed }) // Gauge for cores free of this worker metricRegistry.register(MetricRegistry.name("coresFree"), new Gauge[Int] { override def getValue: Int = worker.coresFree }) // Gauge for memory free of this worker metricRegistry.register(MetricRegistry.name("memFree_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryFree }) }
Example 39
Source File: DAGSchedulerSource.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler import com.codahale.metrics.{Gauge, MetricRegistry, Timer} import org.apache.spark.metrics.source.Source private[scheduler] class DAGSchedulerSource(val dagScheduler: DAGScheduler) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "DAGScheduler" metricRegistry.register(MetricRegistry.name("stage", "failedStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.failedStages.size }) metricRegistry.register(MetricRegistry.name("stage", "runningStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.runningStages.size }) metricRegistry.register(MetricRegistry.name("stage", "waitingStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.waitingStages.size }) metricRegistry.register(MetricRegistry.name("job", "allJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.numTotalJobs }) metricRegistry.register(MetricRegistry.name("job", "activeJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.activeJobs.size }) val messageProcessingTimer: Timer = metricRegistry.timer(MetricRegistry.name("messageProcessingTime")) }
Example 40
Source File: CacheMetrics.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.metrics import com.codahale.metrics.MetricRegistry.MetricSupplier import com.codahale.metrics.{Counter, Gauge, MetricRegistry, Timer} final class CacheMetrics( registry: MetricRegistry, prefix: MetricName, ) { val hitCount: Counter = registry.counter(prefix :+ "hits") val missCount: Counter = registry.counter(prefix :+ "misses") val loadSuccessCount: Counter = registry.counter(prefix :+ "load_successes") val loadFailureCount: Counter = registry.counter(prefix :+ "load_failures") val totalLoadTime: Timer = registry.timer(prefix :+ "load_total_time") val evictionCount: Counter = registry.counter(prefix :+ "evictions") val evictionWeight: Counter = registry.counter(prefix :+ "evicted_weight") def registerSizeGauge(sizeGauge: Gauge[Long]): Unit = register(prefix :+ "size", () => sizeGauge) def registerWeightGauge(weightGauge: Gauge[Long]): Unit = register(prefix :+ "weight", () => weightGauge) private def register[T](name: MetricName, gaugeSupplier: MetricSupplier[Gauge[_]]): Gauge[T] = { registry.remove(name) registry.gauge(name, gaugeSupplier).asInstanceOf[Gauge[T]] } }
Example 41
Source File: ExecutorSource.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.executor import java.util.concurrent.ThreadPoolExecutor import scala.collection.JavaConverters._ import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.hadoop.fs.FileSystem import org.apache.spark.metrics.source.Source private[spark] class ExecutorSource(threadPool: ThreadPoolExecutor, executorId: String) extends Source { private def fileStats(scheme: String) : Option[FileSystem.Statistics] = FileSystem.getAllStatistics.asScala.find(s => s.getScheme.equals(scheme)) private def registerFileSystemStat[T]( scheme: String, name: String, f: FileSystem.Statistics => T, defaultValue: T) = { metricRegistry.register(MetricRegistry.name("filesystem", scheme, name), new Gauge[T] { override def getValue: T = fileStats(scheme).map(f).getOrElse(defaultValue) }) } override val metricRegistry = new MetricRegistry() override val sourceName = "executor" // Gauge for executor thread pool's actively executing task counts metricRegistry.register(MetricRegistry.name("threadpool", "activeTasks"), new Gauge[Int] { override def getValue: Int = threadPool.getActiveCount() }) // Gauge for executor thread pool's approximate total number of tasks that have been completed metricRegistry.register(MetricRegistry.name("threadpool", "completeTasks"), new Gauge[Long] { override def getValue: Long = threadPool.getCompletedTaskCount() }) // Gauge for executor thread pool's current number of threads metricRegistry.register(MetricRegistry.name("threadpool", "currentPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getPoolSize() }) // Gauge got executor thread pool's largest number of threads that have ever simultaneously // been in th pool metricRegistry.register(MetricRegistry.name("threadpool", "maxPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getMaximumPoolSize() }) // Gauge for file system stats of this executor for (scheme <- Array("hdfs", "file")) { registerFileSystemStat(scheme, "read_bytes", _.getBytesRead(), 0L) registerFileSystemStat(scheme, "write_bytes", _.getBytesWritten(), 0L) registerFileSystemStat(scheme, "read_ops", _.getReadOps(), 0) registerFileSystemStat(scheme, "largeRead_ops", _.getLargeReadOps(), 0) registerFileSystemStat(scheme, "write_ops", _.getWriteOps(), 0) } }
Example 42
Source File: MasterSource.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class MasterSource(val master: Master) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "master" // Gauge for worker numbers in cluster metricRegistry.register(MetricRegistry.name("workers"), new Gauge[Int] { override def getValue: Int = master.workers.size }) // Gauge for application numbers in cluster metricRegistry.register(MetricRegistry.name("apps"), new Gauge[Int] { override def getValue: Int = master.apps.size }) // Gauge for waiting application numbers in cluster metricRegistry.register(MetricRegistry.name("waitingApps"), new Gauge[Int] { override def getValue: Int = master.waitingApps.size }) }
Example 43
Source File: WorkerSource.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.worker import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class WorkerSource(val worker: Worker) extends Source { override val sourceName = "worker" override val metricRegistry = new MetricRegistry() metricRegistry.register(MetricRegistry.name("executors"), new Gauge[Int] { override def getValue: Int = worker.executors.size }) // Gauge for cores used of this worker metricRegistry.register(MetricRegistry.name("coresUsed"), new Gauge[Int] { override def getValue: Int = worker.coresUsed }) // Gauge for memory used of this worker metricRegistry.register(MetricRegistry.name("memUsed_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryUsed }) // Gauge for cores free of this worker metricRegistry.register(MetricRegistry.name("coresFree"), new Gauge[Int] { override def getValue: Int = worker.coresFree }) // Gauge for memory free of this worker metricRegistry.register(MetricRegistry.name("memFree_MB"), new Gauge[Int] { override def getValue: Int = worker.memoryFree }) }
Example 44
Source File: DAGSchedulerSource.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler import com.codahale.metrics.{Gauge,MetricRegistry} import org.apache.spark.SparkContext import org.apache.spark.metrics.source.Source private[spark] class DAGSchedulerSource(val dagScheduler: DAGScheduler) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "DAGScheduler" metricRegistry.register(MetricRegistry.name("stage", "failedStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.failedStages.size }) metricRegistry.register(MetricRegistry.name("stage", "runningStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.runningStages.size }) metricRegistry.register(MetricRegistry.name("stage", "waitingStages"), new Gauge[Int] { override def getValue: Int = dagScheduler.waitingStages.size }) metricRegistry.register(MetricRegistry.name("job", "allJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.numTotalJobs }) metricRegistry.register(MetricRegistry.name("job", "activeJobs"), new Gauge[Int] { override def getValue: Int = dagScheduler.activeJobs.size }) }
Example 45
Source File: BlockManagerSource.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.storage import com.codahale.metrics.{Gauge,MetricRegistry} import org.apache.spark.SparkContext import org.apache.spark.metrics.source.Source private[spark] class BlockManagerSource(val blockManager: BlockManager) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "BlockManager" metricRegistry.register(MetricRegistry.name("memory", "maxMem_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val maxMem = storageStatusList.map(_.maxMem).sum maxMem / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("memory", "remainingMem_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val remainingMem = storageStatusList.map(_.memRemaining).sum remainingMem / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("memory", "memUsed_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val memUsed = storageStatusList.map(_.memUsed).sum memUsed / 1024 / 1024 } }) metricRegistry.register(MetricRegistry.name("disk", "diskSpaceUsed_MB"), new Gauge[Long] { override def getValue: Long = { val storageStatusList = blockManager.master.getStorageStatus val diskSpaceUsed = storageStatusList.map(_.diskUsed).sum diskSpaceUsed / 1024 / 1024 } }) }
Example 46
Source File: ExecutorSource.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.executor import scala.collection.JavaConversions._ import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.hadoop.fs.FileSystem import org.apache.spark.metrics.source.Source private[spark] class ExecutorSource(val executor: Executor, executorId: String) extends Source { private def fileStats(scheme: String) : Option[FileSystem.Statistics] = FileSystem.getAllStatistics().filter(s => s.getScheme.equals(scheme)).headOption private def registerFileSystemStat[T]( scheme: String, name: String, f: FileSystem.Statistics => T, defaultValue: T) = { metricRegistry.register(MetricRegistry.name("filesystem", scheme, name), new Gauge[T] { override def getValue: T = fileStats(scheme).map(f).getOrElse(defaultValue) }) } override val metricRegistry = new MetricRegistry() override val sourceName = "executor" // Gauge for executor thread pool's actively executing task counts metricRegistry.register(MetricRegistry.name("threadpool", "activeTasks"), new Gauge[Int] { override def getValue: Int = executor.threadPool.getActiveCount() }) // Gauge for executor thread pool's approximate total number of tasks that have been completed metricRegistry.register(MetricRegistry.name("threadpool", "completeTasks"), new Gauge[Long] { override def getValue: Long = executor.threadPool.getCompletedTaskCount() }) // Gauge for executor thread pool's current number of threads metricRegistry.register(MetricRegistry.name("threadpool", "currentPool_size"), new Gauge[Int] { override def getValue: Int = executor.threadPool.getPoolSize() }) // Gauge got executor thread pool's largest number of threads that have ever simultaneously // been in th pool metricRegistry.register(MetricRegistry.name("threadpool", "maxPool_size"), new Gauge[Int] { override def getValue: Int = executor.threadPool.getMaximumPoolSize() }) // Gauge for file system stats of this executor for (scheme <- Array("hdfs", "file")) { registerFileSystemStat(scheme, "read_bytes", _.getBytesRead(), 0L) registerFileSystemStat(scheme, "write_bytes", _.getBytesWritten(), 0L) registerFileSystemStat(scheme, "read_ops", _.getReadOps(), 0) registerFileSystemStat(scheme, "largeRead_ops", _.getLargeReadOps(), 0) registerFileSystemStat(scheme, "write_ops", _.getWriteOps(), 0) } }
Example 47
Source File: RegistryFactory.scala From bandar-log with Apache License 2.0 | 5 votes |
package com.aol.one.dwh.bandarlog.reporters import com.aol.one.dwh.bandarlog.metrics.Metric import com.aol.one.dwh.infra.util.LogTrait import com.codahale.metrics.{Gauge, MetricRegistry} object RegistryFactory extends LogTrait { def create(): MetricRegistry = { new MetricRegistry() } def createWithMetric[V](metric: Metric[V]): MetricRegistry = { val metricRegistry = create() metricRegistry.register(s"${metric.prefix}.${metric.name}", toGauge(metric)) metricRegistry } private def toGauge[V](metric: Metric[V]): Gauge[V] = { new Gauge[V] { override def getValue: V = { // null values will be filtered and not reported metric.value.getValue.getOrElse(None.orNull.asInstanceOf[V]) } } } }
Example 48
Source File: ExportConsumerMetricsToRegistryActor.scala From remora with MIT License | 5 votes |
import KafkaClientActor.{Command, DescribeKafkaConsumerGroup, ListConsumers} import akka.actor._ import akka.pattern.ask import akka.stream.ActorMaterializer import akka.util.Timeout import com.codahale.metrics.Gauge import models.RegistryKafkaMetric._ import models.{GroupInfo, RegistryKafkaMetric} import nl.grons.metrics.scala.{ActorInstrumentedLifeCycle, ReceiveCounterActor, ReceiveExceptionMeterActor, ReceiveTimerActor} import scala.concurrent.duration._ import scala.reflect.ClassTag object ExportConsumerMetricsToRegistryActor { def props(kafkaClientActorRef: ActorRef)(implicit actorSystem: ActorSystem, materializer: ActorMaterializer) = Props(classOf[ExportConsumerMetricsToRegistryActor], kafkaClientActorRef, actorSystem, materializer) } class BaseExportConsumerMetricsToRegistryActor(kafkaClientActorRef: ActorRef) (implicit actorSystem: ActorSystem, materializer: ActorMaterializer) extends Actor with ActorLogging with nl.grons.metrics.scala.DefaultInstrumented with ActorInstrumentedLifeCycle { implicit val timeout = Timeout(60 seconds) implicit val apiExecutionContext = actorSystem.dispatchers.lookup("exporter-dispatcher") private def askFor[RES](command: Command)(implicit tag: ClassTag[RES]) = (kafkaClientActorRef ? command).mapTo[RES] def receive = { case _ => log.info("Exporting lag info to metrics registry!") val consumerList = askFor[List[String]](ListConsumers) consumerList.map(_.foreach(consumerGroup => { val groupInfo = askFor[GroupInfo](DescribeKafkaConsumerGroup(consumerGroup)) groupInfo.map { gi => gi.partitionAssignmentStates.map(pa => { pa.map { p => val offsetKey = encode(RegistryKafkaMetric("gauge", p.topic.get, p.partition.map(_.toString), p.group, "offset")) registerOrUpdateGauge(offsetKey, p.offset) val lagKey = encode(RegistryKafkaMetric("gauge", p.topic.get, p.partition.map(_.toString), p.group, "lag")) registerOrUpdateGauge(lagKey, p.lag) val logEndKey = encode(RegistryKafkaMetric("gauge", p.topic.get, p.partition.map(_.toString), p.group, "logend")) registerOrUpdateGauge(logEndKey, p.logEndOffset) } gi.lagPerTopic.map { lagPerTopic => lagPerTopic.foreach { case (topic, totalLag) => val lagKey = encode(RegistryKafkaMetric("gauge", topic, None, consumerGroup, "lag" )) registerOrUpdateGauge(lagKey, Some(totalLag)) } } } ) } })) } //yea the gauges aren't really meant to be used by this, but i dont want to cache the results. def registerOrUpdateGauge(gaugeName: String, value: Option[Long]) = { value match { case Some(v) => { metricRegistry.remove(gaugeName) metricRegistry.register(gaugeName, new Gauge[Long] { override def getValue: Long = v }) } case None => log.error(s"Gauge $gaugeName has None!") } } } class ExportConsumerMetricsToRegistryActor(kafkaClientActorRef: ActorRef) (implicit actorSystem: ActorSystem, materializer: ActorMaterializer) extends BaseExportConsumerMetricsToRegistryActor(kafkaClientActorRef) with ReceiveCounterActor with ReceiveTimerActor with ReceiveExceptionMeterActor
Example 49
Source File: MesosClusterSchedulerSource.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler.cluster.mesos import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[mesos] class MesosClusterSchedulerSource(scheduler: MesosClusterScheduler) extends Source { override def sourceName: String = "mesos_cluster" override def metricRegistry: MetricRegistry = new MetricRegistry() metricRegistry.register(MetricRegistry.name("waitingDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getQueuedDriversSize }) metricRegistry.register(MetricRegistry.name("launchedDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getLaunchedDriversSize }) metricRegistry.register(MetricRegistry.name("retryDrivers"), new Gauge[Int] { override def getValue: Int = scheduler.getPendingRetryDriversSize }) }
Example 50
Source File: MetricsReporter.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.streaming import java.{util => ju} import scala.collection.mutable import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.internal.Logging import org.apache.spark.metrics.source.{Source => CodahaleSource} import org.apache.spark.util.Clock class MetricsReporter( stream: StreamExecution, override val sourceName: String) extends CodahaleSource with Logging { override val metricRegistry: MetricRegistry = new MetricRegistry // Metric names should not have . in them, so that all the metrics of a query are identified // together in Ganglia as a single metric group registerGauge("inputRate-total", () => stream.lastProgress.inputRowsPerSecond) registerGauge("processingRate-total", () => stream.lastProgress.inputRowsPerSecond) registerGauge("latency", () => stream.lastProgress.durationMs.get("triggerExecution").longValue()) private def registerGauge[T](name: String, f: () => T)(implicit num: Numeric[T]): Unit = { synchronized { metricRegistry.register(name, new Gauge[T] { override def getValue: T = f() }) } } }
Example 51
Source File: MasterSource.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class MasterSource(val master: Master) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "master" // Gauge for worker numbers in cluster metricRegistry.register(MetricRegistry.name("workers"), new Gauge[Int] { override def getValue: Int = master.workers.size }) // Gauge for alive worker numbers in cluster metricRegistry.register(MetricRegistry.name("aliveWorkers"), new Gauge[Int]{ override def getValue: Int = master.workers.count(_.state == WorkerState.ALIVE) }) // Gauge for application numbers in cluster metricRegistry.register(MetricRegistry.name("apps"), new Gauge[Int] { override def getValue: Int = master.apps.size }) // Gauge for waiting application numbers in cluster metricRegistry.register(MetricRegistry.name("waitingApps"), new Gauge[Int] { override def getValue: Int = master.apps.count(_.state == ApplicationState.WAITING) }) }