breeze.optimize.DiffFunction Scala Examples
The following examples show how to use breeze.optimize.DiffFunction.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: DifferentiableRegularization.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.ml.optim.loss import breeze.optimize.DiffFunction import org.apache.spark.ml.linalg._ private[ml] class L2Regularization( override val regParam: Double, shouldApply: Int => Boolean, applyFeaturesStd: Option[Int => Double]) extends DifferentiableRegularization[Vector] { override def calculate(coefficients: Vector): (Double, Vector) = { coefficients match { case dv: DenseVector => var sum = 0.0 val gradient = new Array[Double](dv.size) dv.values.indices.filter(shouldApply).foreach { j => val coef = coefficients(j) applyFeaturesStd match { case Some(getStd) => // If `standardization` is false, we still standardize the data // to improve the rate of convergence; as a result, we have to // perform this reverse standardization by penalizing each component // differently to get effectively the same objective function when // the training dataset is not standardized. val std = getStd(j) if (std != 0.0) { val temp = coef / (std * std) sum += coef * temp gradient(j) = regParam * temp } else { 0.0 } case None => // If `standardization` is true, compute L2 regularization normally. sum += coef * coef gradient(j) = coef * regParam } } (0.5 * sum * regParam, Vectors.dense(gradient)) case _: SparseVector => throw new IllegalArgumentException("Sparse coefficients are not currently supported.") } } }
Example 2
Source File: RDDLossFunction.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.ml.optim.loss import scala.reflect.ClassTag import breeze.linalg.{DenseVector => BDV} import breeze.optimize.DiffFunction import org.apache.spark.broadcast.Broadcast import org.apache.spark.ml.linalg.{BLAS, Vector, Vectors} import org.apache.spark.ml.optim.aggregator.DifferentiableLossAggregator import org.apache.spark.rdd.RDD private[ml] class RDDLossFunction[ T: ClassTag, Agg <: DifferentiableLossAggregator[T, Agg]: ClassTag]( instances: RDD[T], getAggregator: (Broadcast[Vector] => Agg), regularization: Option[DifferentiableRegularization[Vector]], aggregationDepth: Int = 2) extends DiffFunction[BDV[Double]] { override def calculate(coefficients: BDV[Double]): (Double, BDV[Double]) = { val bcCoefficients = instances.context.broadcast(Vectors.fromBreeze(coefficients)) val thisAgg = getAggregator(bcCoefficients) val seqOp = (agg: Agg, x: T) => agg.add(x) val combOp = (agg1: Agg, agg2: Agg) => agg1.merge(agg2) val newAgg = instances.treeAggregate(thisAgg)(seqOp, combOp, aggregationDepth) val gradient = newAgg.gradient val regLoss = regularization.map { regFun => val (regLoss, regGradient) = regFun.calculate(Vectors.fromBreeze(coefficients)) BLAS.axpy(1.0, regGradient, gradient) regLoss }.getOrElse(0.0) bcCoefficients.destroy(blocking = false) (newAgg.loss + regLoss, gradient.asBreeze.toDenseVector) } }
Example 3
Source File: LinearModel.scala From doddle-model with Apache License 2.0 | 5 votes |
package io.picnicml.doddlemodel.linear.typeclasses import breeze.linalg.{DenseMatrix, DenseVector} import breeze.optimize.{DiffFunction, LBFGS} import io.picnicml.doddlemodel.data.{Features, RealVector, Target} import io.picnicml.doddlemodel.typeclasses.Predictor trait LinearModel[A] { this: Predictor[A] => protected[linear] def lossGradStateless(model: A, w: RealVector, x: Features, y: Target): RealVector override def isFitted(model: A): Boolean = w(model).isDefined override def predictSafe(model: A, x: Features): Target = predictStateless(model, w(model).get, xWithBiasTerm(x)) protected def maximumLikelihood(model: A, x: Features, y: Target, init: RealVector): RealVector = { val diffFunction = new DiffFunction[RealVector] { override def calculate(w: RealVector): (Double, RealVector) = (lossStateless(model, w, x, y).toDouble, lossGradStateless(model, w, x, y)) } val lbfgs = new LBFGS[DenseVector[Float]](tolerance = 1e-4) lbfgs.minimize(diffFunction, init) } protected def xWithBiasTerm(x: Features): Features = DenseMatrix.horzcat(DenseMatrix.ones[Float](x.rows, 1), x) }