org.apache.spark.executor.OutputMetrics Scala Examples
The following examples show how to use org.apache.spark.executor.OutputMetrics.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: RiakWriterTaskCompletionListener.scala From spark-riak-connector with Apache License 2.0 | 5 votes |
package org.apache.spark.riak import org.apache.spark.TaskContext import org.apache.spark.executor.{DataWriteMethod, OutputMetrics} import org.apache.spark.util.TaskCompletionListener class RiakWriterTaskCompletionListener(recordsWritten: Long) extends TaskCompletionListener{ override def onTaskCompletion(context: TaskContext): Unit = { val metrics = OutputMetrics(DataWriteMethod.Hadoop) metrics.setRecordsWritten(recordsWritten) context.taskMetrics().outputMetrics = Some(metrics) } } object RiakWriterTaskCompletionListener { def apply(recordsWritten: Long) = new RiakWriterTaskCompletionListener(recordsWritten) }
Example 2
Source File: ExposedMetrics.scala From hail with MIT License | 5 votes |
package org.apache.spark import org.apache.spark.executor.{InputMetrics, OutputMetrics} object ExposedMetrics { def incrementRecord(metrics: InputMetrics) { metrics.incRecordsRead(1) } def incrementBytes(metrics: InputMetrics, nBytes: Long) { metrics.incBytesRead(nBytes) } def setBytes(metrics: OutputMetrics, nBytes: Long) { metrics.setBytesWritten(nBytes) } def setRecords(metrics: OutputMetrics, nRecords: Long) { metrics.setRecordsWritten(nRecords) } }