org.apache.spark.mllib.stat.MultivariateStatisticalSummary Scala Examples
The following examples show how to use org.apache.spark.mllib.stat.MultivariateStatisticalSummary.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: SummaryStatisticsExample.scala From drizzle-spark with Apache License 2.0 | 6 votes |
// scalastyle:off println package org.apache.spark.examples.mllib import org.apache.spark.{SparkConf, SparkContext} // $example on$ import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.mllib.stat.{MultivariateStatisticalSummary, Statistics} // $example off$ object SummaryStatisticsExample { def main(args: Array[String]): Unit = { val conf = new SparkConf().setAppName("SummaryStatisticsExample") val sc = new SparkContext(conf) // $example on$ val observations = sc.parallelize( Seq( Vectors.dense(1.0, 10.0, 100.0), Vectors.dense(2.0, 20.0, 200.0), Vectors.dense(3.0, 30.0, 300.0) ) ) // Compute column summary statistics. val summary: MultivariateStatisticalSummary = Statistics.colStats(observations) println(summary.mean) // a dense vector containing the mean value for each column println(summary.variance) // column-wise variance println(summary.numNonzeros) // number of nonzeros in each column // $example off$ sc.stop() } } // scalastyle:on println
Example 2
Source File: BugDemonstrationTest.scala From spark-tsne with Apache License 2.0 | 5 votes |
package com.github.saurfang.spark.tsne import org.apache.spark.mllib.linalg.{Vectors, Vector} import org.apache.spark.mllib.stat.{MultivariateStatisticalSummary, Statistics} import org.apache.spark.sql.SparkSession import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers} class BugDemonstrationTest extends FunSuite with Matchers with BeforeAndAfterAll { private var sparkSession : SparkSession = _ override def beforeAll(): Unit = { super.beforeAll() sparkSession = SparkSession.builder().appName("BugTests").master("local[2]").getOrCreate() } override def afterAll(): Unit = { super.afterAll() sparkSession.stop() } test("This demonstrates a bug was fixed in tsne-spark 2.1") { val sc = sparkSession.sparkContext val observations = sc.parallelize( Seq( Vectors.dense(1.0, 10.0, 100.0), Vectors.dense(2.0, 20.0, 200.0), Vectors.dense(3.0, 30.0, 300.0) ) ) // Compute column summary statistics. val summary: MultivariateStatisticalSummary = Statistics.colStats(observations) val expectedMean = Vectors.dense(2.0,20.0,200.0) val resultMean = summary.mean assertEqualEnough(resultMean, expectedMean) val expectedVariance = Vectors.dense(1.0,100.0,10000.0) assertEqualEnough(summary.variance, expectedVariance) val expectedNumNonZeros = Vectors.dense(3.0, 3.0, 3.0) assertEqualEnough(summary.numNonzeros, expectedNumNonZeros) } private def assertEqualEnough(sample: Vector, expected: Vector): Unit = { expected.toArray.zipWithIndex.foreach{ case(d: Double, i: Int) => sample(i) should be (d +- 1E-12) } } }
Example 3
Source File: SummaryStatisticsExample.scala From sparkoscope with Apache License 2.0 | 5 votes |
// scalastyle:off println package org.apache.spark.examples.mllib import org.apache.spark.{SparkConf, SparkContext} // $example on$ import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.mllib.stat.{MultivariateStatisticalSummary, Statistics} // $example off$ object SummaryStatisticsExample { def main(args: Array[String]): Unit = { val conf = new SparkConf().setAppName("SummaryStatisticsExample") val sc = new SparkContext(conf) // $example on$ val observations = sc.parallelize( Seq( Vectors.dense(1.0, 10.0, 100.0), Vectors.dense(2.0, 20.0, 200.0), Vectors.dense(3.0, 30.0, 300.0) ) ) // Compute column summary statistics. val summary: MultivariateStatisticalSummary = Statistics.colStats(observations) println(summary.mean) // a dense vector containing the mean value for each column println(summary.variance) // column-wise variance println(summary.numNonzeros) // number of nonzeros in each column // $example off$ sc.stop() } } // scalastyle:on println
Example 4
Source File: TimeSeriesRegressionMetrics.scala From uberdata with Apache License 2.0 | 5 votes |
package org.apache.spark.ml.evaluation import org.apache.spark.ml.linalg.Vectors import org.apache.spark.mllib.stat.{MultivariateOnlineSummarizer, MultivariateStatisticalSummary} import org.apache.spark.rdd.RDD import scala.reflect.ClassTag class TimeSeriesRegressionMetrics[T]( idPredictionsAndObservations: RDD[(T, Int, Array[(Double, Double)])], isLargerBetter: Boolean )(implicit kt: ClassTag[T], ord: Ordering[T] = null) { private lazy val summaryRDD: RDD[ (T, Int, Array[(Double, Double)], MultivariateStatisticalSummary) ] = idPredictionsAndObservations.map { case (id, modelIndex, array) => (id, modelIndex, array, array.map { case (observation, prediction) => Vectors.dense(observation, observation - prediction) }.aggregate(new MultivariateOnlineSummarizer())((summary, current) => summary.add(org.apache.spark.mllib.linalg.Vectors.fromML(current)), (sum1, sum2) => sum1.merge(sum2))) } private lazy val SSerr = summaryRDD.map { case (id, modelIndex, values, summary) => ((id, modelIndex), (math.pow(summary.normL2(1), 2), summary)) } private lazy val SStot = summaryRDD.map { case (id, modelIndex, values, summary) => ((id, modelIndex), summary.variance(0) * (summary.count - 1)) } private lazy val SSreg = { summaryRDD.map { case (id, modelIndex, values, summary) => val yMean = summary.mean(0) (id, modelIndex, values.map { case (prediction, observation) => math.pow(prediction - yMean, 2) }.sum, summary) } } def explainedVariance: RDD[(T, (Int, Double))] = SSreg.map { case (id, modelIndex, regValue, summary) => (id, (modelIndex, regValue / summary.count)) } def meanAbsoluteError: RDD[(T, (Int, Double))] = summaryRDD.map { case (id, modelIndex, _, summary) => (id, (modelIndex, summary.normL1(1) / summary.count)) } def meanSquaredError: RDD[(T, (Int, Double))] = SSerr.map { case ((id, modelIndex), (err, summary)) => (id, (modelIndex, err / summary.count)) } def rootMeanSquaredError: RDD[(T, (Int, Double))] = meanSquaredError.map { case (id, (modelIndex, err)) => (id, (modelIndex, math.sqrt(err))) } def r2: RDD[(T, (Int, Double))] = SSerr.join(SStot).map { case ((id, modelIndex), ((sSerr, _), (sStot))) => (id, (modelIndex, 1 - calc(f => sSerr / sStot))) } //TODO refazer private def calc(f: Any => Double) = try { f() } catch { case e: Exception => e.printStackTrace() if (isLargerBetter) 0d else Double.MaxValue } }
Example 5
Source File: TimeSeriesSmallModelRegressionMetrics.scala From uberdata with Apache License 2.0 | 5 votes |
package org.apache.spark.ml.evaluation import org.apache.spark.ml.linalg.Vectors import org.apache.spark.mllib.linalg.Vector import org.apache.spark.mllib.stat.{MultivariateOnlineSummarizer, MultivariateStatisticalSummary} class TimeSeriesSmallModelRegressionMetrics( idPredictionsAndObservations: Array[(Double, Double)] ) { private lazy val summary: MultivariateStatisticalSummary = idPredictionsAndObservations.map { case (observation, prediction) => Vectors.dense(observation, observation - prediction) }.aggregate(new MultivariateOnlineSummarizer())( (summary, current) => summary.add(org.apache.spark.mllib.linalg.Vectors.fromML(current)), (sum1, sum2) => sum1.merge(sum2) ) private lazy val SSerr = math.pow(summary.normL2(1), 2) private lazy val SStot = summary.variance(0) * (summary.count - 1) private lazy val SSreg = { val yMean = summary.mean(0) idPredictionsAndObservations.map { case (prediction, observation) => math.pow(prediction - yMean, 2) }.sum } def explainedVariance = SSreg / summary.count def meanAbsoluteError = summary.normL1(1) / summary.count def meanSquaredError = SSerr / summary.count def rootMeanSquaredPercentageError = math.sqrt(idPredictionsAndObservations.map { case (observation, prediction) => if (observation == 0) { 0 } else { Math.pow((observation - prediction) / observation, 2) } }.sum / summary.count) def rootMeanSquaredError = math.sqrt(meanSquaredError) def r2 = 1 - (SSerr / SStot) }
Example 6
Source File: SummaryStatisticsExample.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
// scalastyle:off println package org.apache.spark.examples.mllib import org.apache.spark.{SparkConf, SparkContext} // $example on$ import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.mllib.stat.{MultivariateStatisticalSummary, Statistics} // $example off$ object SummaryStatisticsExample { def main(args: Array[String]): Unit = { val conf = new SparkConf().setAppName("SummaryStatisticsExample") val sc = new SparkContext(conf) // $example on$ val observations = sc.parallelize( Seq( Vectors.dense(1.0, 10.0, 100.0), Vectors.dense(2.0, 20.0, 200.0), Vectors.dense(3.0, 30.0, 300.0) ) ) // Compute column summary statistics. val summary: MultivariateStatisticalSummary = Statistics.colStats(observations) println(summary.mean) // a dense vector containing the mean value for each column println(summary.variance) // column-wise variance println(summary.numNonzeros) // number of nonzeros in each column // $example off$ sc.stop() } } // scalastyle:on println
Example 7
Source File: ContinuousDistributionBuilderFactory.scala From seahorse with Apache License 2.0 | 5 votes |
package ai.deepsense.deeplang.doperables.dataframe.report.distribution.continuous import org.apache.spark.mllib.stat.MultivariateStatisticalSummary import org.apache.spark.sql.types.StructField import ai.deepsense.deeplang.doperables.dataframe.report.distribution._ import ai.deepsense.deeplang.utils.SparkTypeConverter._ import ai.deepsense.deeplang.utils.aggregators._ object ContinuousDistributionBuilderFactory { def prepareBuilder( columnIndex: Int, field: StructField, multivarStats: MultivariateStatisticalSummary): DistributionBuilder = { val columnStats = ColumnStats.fromMultiVarStats(multivarStats, columnIndex) // MultivarStats inits min with Double.MaxValue and max with MinValue. // If there is at least one not (null or NaN) its guaranteed to change min/max values. // TODO Its a bit hacky. Find more elegant solution. Example approaches: // - Filter out nulls? Problematic because we operate on vectors for performance. // - Remade spark aggregators to return options? val hasOnlyNulls = columnStats.min == Double.MaxValue && columnStats.max == Double.MinValue if (!hasOnlyNulls) { val histogram = { val buckets = BucketsCalculator.calculateBuckets(field.dataType, columnStats) HistogramAggregator(buckets, true).mapInput(getColumnAsDouble(columnIndex)) } val missing = CountOccurenceAggregator[Option[Any]](None).mapInput(getOption(columnIndex)) val colStats = columnStats ContinuousDistributionBuilder(histogram, missing, field, colStats) } else { NoDistributionBuilder(field.name, NoDistributionReasons.OnlyNulls) } } }
Example 8
Source File: ContinuousDistributionBuilder.scala From seahorse with Apache License 2.0 | 5 votes |
package ai.deepsense.deeplang.doperables.dataframe.report.distribution.continuous import org.apache.spark.mllib.stat.MultivariateStatisticalSummary import org.apache.spark.sql.Row import org.apache.spark.sql.types._ import ai.deepsense.commons.datetime.DateTimeConverter import ai.deepsense.commons.utils.DoubleUtils import ai.deepsense.deeplang.doperables.dataframe.report.distribution.{ColumnStats, DistributionBuilder} import ai.deepsense.deeplang.utils.aggregators.Aggregator import ai.deepsense.deeplang.utils.aggregators.AggregatorBatch.BatchedResult import ai.deepsense.reportlib.model import ai.deepsense.reportlib.model.{ContinuousDistribution, Distribution} case class ContinuousDistributionBuilder( histogram: Aggregator[Array[Long], Row], missing: Aggregator[Long, Row], field: StructField, columnStats: ColumnStats) extends DistributionBuilder { def allAggregators: Seq[Aggregator[_, Row]] = Seq(histogram, missing) override def build(results: BatchedResult): Distribution = { val buckets = BucketsCalculator.calculateBuckets(field.dataType, columnStats) val histogramCounts = results.forAggregator(histogram) val nullsCount = results.forAggregator(missing) val labels = buckets2Labels(buckets, field) val stats = model.Statistics( double2Label(field)(columnStats.max), double2Label(field)(columnStats.min), mean2Label(field)(columnStats.mean)) ContinuousDistribution( field.name, s"Continuous distribution for ${field.name} column", nullsCount, labels, histogramCounts, stats) } private def buckets2Labels( buckets: Seq[Double], structField: StructField): Seq[String] = buckets.map(double2Label(structField)) def mean2Label(structField: StructField)(d: Double): String = structField.dataType match { case ByteType | ShortType | IntegerType | LongType => DoubleUtils.double2String(d) case _ => double2Label(structField)(d) } def double2Label(structField: StructField)(d: Double): String = { if (d.isNaN) { "NaN" } else { structField.dataType match { case ByteType => d.toByte.toString case ShortType => d.toShort.toString case IntegerType => d.toInt.toString case LongType => d.toLong.toString case FloatType | DoubleType | _: DecimalType => DoubleUtils.double2String(d) case BooleanType => if (d == 0D) false.toString else true.toString case TimestampType | DateType => DateTimeConverter.toString(DateTimeConverter.fromMillis(d.toLong)) } } } }
Example 9
Source File: DistributionCalculator.scala From seahorse with Apache License 2.0 | 5 votes |
package ai.deepsense.deeplang.doperables.dataframe.report.distribution import org.apache.spark.mllib.stat.MultivariateStatisticalSummary import org.apache.spark.sql.DataFrame import org.apache.spark.sql.types._ import ai.deepsense.deeplang.doperables.dataframe.report.distribution.continuous.ContinuousDistributionBuilderFactory import ai.deepsense.deeplang.doperables.dataframe.report.distribution.discrete.DiscreteDistributionBuilderFactory import ai.deepsense.deeplang.utils.aggregators.AggregatorBatch import ai.deepsense.reportlib.model._ object DistributionCalculator { def distributionByColumn( sparkDataFrame: org.apache.spark.sql.DataFrame, multivarStats: MultivariateStatisticalSummary): Map[String, Distribution] = { val dataFrameEmpty = multivarStats.count == 0 if (dataFrameEmpty) { noDistributionBecauseOfNoData(sparkDataFrame.schema) } else { distributionForNonEmptyDataFrame(sparkDataFrame, multivarStats) } } private def noDistributionBecauseOfNoData(schema: StructType): Map[String, Distribution] = { for (columnName <- schema.fieldNames) yield { columnName -> NoDistribution( columnName, NoDistributionReasons.NoData ) } }.toMap private def distributionForNonEmptyDataFrame( sparkDataFrame: DataFrame, multivarStats: MultivariateStatisticalSummary): Map[String, Distribution] = { val schema = sparkDataFrame.schema val distributionBuilders = for { (structField, columnIndex) <- sparkDataFrame.schema.zipWithIndex } yield { DistributionType.forStructField(structField) match { case DistributionType.Discrete => DiscreteDistributionBuilderFactory.prepareBuilder(columnIndex, structField) case DistributionType.Continuous => ContinuousDistributionBuilderFactory.prepareBuilder( columnIndex, structField, multivarStats) case DistributionType.NotApplicable => NoDistributionBuilder( structField.name, NoDistributionReasons.NotApplicableForType(structField.dataType)) } } val results = { val aggregators = distributionBuilders.flatMap(_.allAggregators) AggregatorBatch.executeInBatch(sparkDataFrame.rdd, aggregators) } val distributions = distributionBuilders.map(_.build(results)) distributions.map(d => d.name -> d).toMap } }
Example 10
Source File: BasicStatistics.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.examples.mllib import org.apache.spark.mllib.stat.MultivariateStatisticalSummary import org.apache.spark.mllib.stat.Statistics import org.apache.spark.mllib.linalg.Vector import org.apache.spark.rdd.RDD import org.apache.spark.SparkContext import org.apache.spark.mllib.linalg.Matrix val sc: SparkContext = null val seriesX: RDD[Double] = null // a series 一系列 //必须与seriesX具有相同数量的分区和基数 val seriesY: RDD[Double] = null // must have the same number of partitions and cardinality as seriesX // compute the correlation using Pearson's method. Enter "spearman" for Spearman's method. If a // method is not specified, Pearson's method will be used by default. //pearson皮尔森相关性 val correlation: Double = Statistics.corr(seriesX, seriesY, "pearson") println("pearson:"+correlation) //请注意,每个向量是一个行,而不是一个列 val data: RDD[Vector] = null // note that each Vector is a row and not a column //spearman 斯皮尔曼相关性 // calculate the correlation matrix using Pearson's method. Use "spearman" for Spearman's method. //用皮尔森法计算相关矩阵,用“斯皮尔曼”的斯皮尔曼方法 // If a method is not specified, Pearson's method will be used by default. //如果没有指定方法,皮尔森的方法将被默认使用 val correlMatrix: Matrix = Statistics.corr(data, "pearson") println("correlMatrix:"+correlMatrix.toString()) } }
Example 11
Source File: StatisticsDemo.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.examples.mllib import org.apache.spark.SparkConf import org.apache.spark.SparkContext import org.apache.spark.mllib.stat.MultivariateStatisticalSummary import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.mllib.stat.Statistics object StatisticsDemo { def main(args: Array[String]) { //val sparkConf = new SparkConf().setMast("local[2]").setAppName("SparkHdfsLR") val conf = new SparkConf().setAppName("test").setMaster("local") val sc = new SparkContext(conf) val rdd1 = sc.parallelize( Array( Array(1.0, 2.0, 3.0), Array(2.0, 3.0, 4.0))).map(f =>Vectors.dense(f)) //比如1.2.3.4.5 这五个数的平均数是3 val mss = Statistics.colStats(rdd1) //方差是各个数据与平均数之差的平方相加再除以个数 //方差越小越稳定,表示数据间差别小 println("均值:" + mss.mean); println("样本方差:" + mss.variance); //样本方差是各个数据与平均数之差的平方相加再除以(个数-1) println("非零统计量个数:" + mss.numNonzeros); println("总数:" + mss.count); println("最大值:" + mss.max); println("最小值:" + mss.min); //其它normL2等统计信息 val land1 = Vectors.dense(1000.0, 1856.0) val land2 = Vectors.dense(400, 560) val c1 = Statistics.chiSqTest(land1, land2) } }
Example 12
Source File: SummaryStatisticsExample.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
// scalastyle:off println package org.apache.spark.examples.mllib import org.apache.spark.{SparkConf, SparkContext} // $example on$ import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.mllib.stat.{MultivariateStatisticalSummary, Statistics} // $example off$ object SummaryStatisticsExample { def main(args: Array[String]): Unit = { val conf = new SparkConf().setAppName("SummaryStatisticsExample") val sc = new SparkContext(conf) // $example on$ val observations = sc.parallelize( Seq( Vectors.dense(1.0, 10.0, 100.0), Vectors.dense(2.0, 20.0, 200.0), Vectors.dense(3.0, 30.0, 300.0) ) ) // Compute column summary statistics. val summary: MultivariateStatisticalSummary = Statistics.colStats(observations) println(summary.mean) // a dense vector containing the mean value for each column println(summary.variance) // column-wise variance println(summary.numNonzeros) // number of nonzeros in each column // $example off$ sc.stop() } } // scalastyle:on println
Example 13
Source File: ContinuousDistributionBuilderFactory.scala From seahorse-workflow-executor with Apache License 2.0 | 5 votes |
package io.deepsense.deeplang.doperables.dataframe.report.distribution.continuous import org.apache.spark.mllib.stat.MultivariateStatisticalSummary import org.apache.spark.sql.types.StructField import io.deepsense.deeplang.doperables.dataframe.report.distribution._ import io.deepsense.deeplang.utils.SparkTypeConverter._ import io.deepsense.deeplang.utils.aggregators._ object ContinuousDistributionBuilderFactory { def prepareBuilder( columnIndex: Int, field: StructField, multivarStats: MultivariateStatisticalSummary): DistributionBuilder = { val columnStats = ColumnStats.fromMultiVarStats(multivarStats, columnIndex) // MultivarStats inits min with Double.MaxValue and max with MinValue. // If there is at least one not (null or NaN) its guaranteed to change min/max values. // TODO Its a bit hacky. Find more elegant solution. Example approaches: // - Filter out nulls? Problematic because we operate on vectors for performance. // - Remade spark aggregators to return options? val hasOnlyNulls = columnStats.min == Double.MaxValue && columnStats.max == Double.MinValue if (!hasOnlyNulls) { val histogram = { val buckets = BucketsCalculator.calculateBuckets(field.dataType, columnStats) HistogramAggregator(buckets, true).mapInput(getColumnAsDouble(columnIndex)) } val missing = CountOccurenceAggregator[Option[Any]](None).mapInput(getOption(columnIndex)) val colStats = columnStats ContinuousDistributionBuilder(histogram, missing, field, colStats) } else { NoDistributionBuilder(field.name, NoDistributionReasons.OnlyNulls) } } }
Example 14
Source File: ContinuousDistributionBuilder.scala From seahorse-workflow-executor with Apache License 2.0 | 5 votes |
package io.deepsense.deeplang.doperables.dataframe.report.distribution.continuous import org.apache.spark.mllib.stat.MultivariateStatisticalSummary import org.apache.spark.sql.Row import org.apache.spark.sql.types._ import io.deepsense.commons.datetime.DateTimeConverter import io.deepsense.commons.utils.DoubleUtils import io.deepsense.deeplang.doperables.dataframe.report.distribution.{ColumnStats, DistributionBuilder} import io.deepsense.deeplang.utils.aggregators.Aggregator import io.deepsense.deeplang.utils.aggregators.AggregatorBatch.BatchedResult import io.deepsense.reportlib.model import io.deepsense.reportlib.model.{ContinuousDistribution, Distribution} case class ContinuousDistributionBuilder( histogram: Aggregator[Array[Long], Row], missing: Aggregator[Long, Row], field: StructField, columnStats: ColumnStats) extends DistributionBuilder { def allAggregators: Seq[Aggregator[_, Row]] = Seq(histogram, missing) override def build(results: BatchedResult): Distribution = { val buckets = BucketsCalculator.calculateBuckets(field.dataType, columnStats) val histogramCounts = results.forAggregator(histogram) val nullsCount = results.forAggregator(missing) val labels = buckets2Labels(buckets, field) val stats = model.Statistics( double2Label(field)(columnStats.max), double2Label(field)(columnStats.min), mean2Label(field)(columnStats.mean)) ContinuousDistribution( field.name, s"Continuous distribution for ${field.name} column", nullsCount, labels, histogramCounts, stats) } private def buckets2Labels( buckets: Seq[Double], structField: StructField): Seq[String] = buckets.map(double2Label(structField)) def mean2Label(structField: StructField)(d: Double): String = structField.dataType match { case ByteType | ShortType | IntegerType | LongType => DoubleUtils.double2String(d) case _ => double2Label(structField)(d) } def double2Label(structField: StructField)(d: Double): String = { if (d.isNaN) { "NaN" } else { structField.dataType match { case ByteType => d.toByte.toString case ShortType => d.toShort.toString case IntegerType => d.toInt.toString case LongType => d.toLong.toString case FloatType | DoubleType | _: DecimalType => DoubleUtils.double2String(d) case BooleanType => if (d == 0D) false.toString else true.toString case TimestampType | DateType => DateTimeConverter.toString(DateTimeConverter.fromMillis(d.toLong)) } } } }
Example 15
Source File: DistributionCalculator.scala From seahorse-workflow-executor with Apache License 2.0 | 5 votes |
package io.deepsense.deeplang.doperables.dataframe.report.distribution import org.apache.spark.mllib.stat.MultivariateStatisticalSummary import org.apache.spark.sql.DataFrame import org.apache.spark.sql.types._ import io.deepsense.deeplang.doperables.dataframe.report.distribution.continuous.ContinuousDistributionBuilderFactory import io.deepsense.deeplang.doperables.dataframe.report.distribution.discrete.DiscreteDistributionBuilderFactory import io.deepsense.deeplang.utils.aggregators.AggregatorBatch import io.deepsense.reportlib.model._ object DistributionCalculator { def distributionByColumn( sparkDataFrame: org.apache.spark.sql.DataFrame, multivarStats: MultivariateStatisticalSummary): Map[String, Distribution] = { val dataFrameEmpty = multivarStats.count == 0 if (dataFrameEmpty) { noDistributionBecauseOfNoData(sparkDataFrame.schema) } else { distributionForNonEmptyDataFrame(sparkDataFrame, multivarStats) } } private def noDistributionBecauseOfNoData(schema: StructType): Map[String, Distribution] = { for (columnName <- schema.fieldNames) yield { columnName -> NoDistribution( columnName, NoDistributionReasons.NoData ) } }.toMap private def distributionForNonEmptyDataFrame( sparkDataFrame: DataFrame, multivarStats: MultivariateStatisticalSummary): Map[String, Distribution] = { val schema = sparkDataFrame.schema val distributionBuilders = for { (structField, columnIndex) <- sparkDataFrame.schema.zipWithIndex } yield { DistributionType.forStructField(structField) match { case DistributionType.Discrete => DiscreteDistributionBuilderFactory.prepareBuilder(columnIndex, structField) case DistributionType.Continuous => ContinuousDistributionBuilderFactory.prepareBuilder( columnIndex, structField, multivarStats) case DistributionType.NotApplicable => NoDistributionBuilder( structField.name, NoDistributionReasons.NotApplicableForType(structField.dataType)) } } val results = { val aggregators = distributionBuilders.flatMap(_.allAggregators) AggregatorBatch.executeInBatch(sparkDataFrame.rdd, aggregators) } val distributions = distributionBuilders.map(_.build(results)) distributions.map(d => d.name -> d).toMap } }