breeze.numerics.exp Scala Examples
The following examples show how to use breeze.numerics.exp.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: TransformerOperation.scala From BigDL with Apache License 2.0 | 6 votes |
package com.intel.analytics.bigdl.nn import breeze.linalg.* import breeze.numerics.exp import com.intel.analytics.bigdl._ import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, TensorModule} import com.intel.analytics.bigdl.optim.Regularizer import com.intel.analytics.bigdl.tensor.Tensor import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric import com.intel.analytics.bigdl.tensor.{Storage, Tensor} import com.intel.analytics.bigdl.utils.{EngineType, T} import scala.reflect.ClassTag private[nn] object TransformerOperation { def dense[T: ClassTag]( inputSize: Int, outputSize: Int, bias: Boolean = true, activation: TensorModule[T] = null, wRegularizer: Regularizer[T] = null, bRegularizer: Regularizer[T] = null, name: String = "")(implicit ev: TensorNumeric[T]): Module[T] = { val seq = new Sequential[T]() val layer = Linear[T]( inputSize = inputSize, outputSize = outputSize, withBias = bias, wRegularizer = wRegularizer, bRegularizer = bRegularizer) layer.setInitMethod(weightInitMethod = Xavier, biasInitMethod = Zeros) if (name != "") layer.setName(name) seq.add(TimeDistributed[T](layer)) if (activation != null) seq.add(activation) seq } def softMax[T: ClassTag]()(implicit ev: TensorNumeric[T]): Module[T] = { val layer = SoftMax[T]() val model = Sequential[T]() model.add(Transpose[T](Array((2, 4)))) model.add(layer) model.add(Transpose[T](Array((2, 4)))) model.asInstanceOf[AbstractModule[Tensor[T], Tensor[T], T]] } def attentionBiasLowerTriangle[T: ClassTag]( length: Int, output: Tensor[T])(implicit ev: TensorNumeric[T]): Tensor[T] = { val arr = output.storage().array() for (i <- 0 to (length - 1)) { var j = length - 1 while (j > i) { // reminder: here not 1 arr(i * length + j) = ev.fromType(maskValue) j -= 1 } } output.resize(Array(1, 1, length, length)) } } sealed trait TransformerType case object Translation extends TransformerType case object LanguageModel extends TransformerType
Example 2
Source File: LDAOptimizer.scala From mleap with Apache License 2.0 | 5 votes |
package ml.combust.mleap.core.clustering.optimization import breeze.linalg.{Vector, sum, DenseMatrix => BDM, DenseVector => BDV, SparseVector => BSV} import breeze.numerics.{abs, exp} import breeze.stats.distributions.Gamma import ml.combust.mleap.core.annotation.SparkCode import ml.combust.mleap.core.clustering.LDAUtils private[clustering] def variationalTopicInference( termCounts: Vector[Double], expElogbeta: BDM[Double], alpha: breeze.linalg.Vector[Double], gammaShape: Double, k: Int): (BDV[Double], BDM[Double], List[Int]) = { val (ids: List[Int], cts: Array[Double]) = termCounts match { case v: BDV[Double] => ((0 until v.size).toList, v.data) case v: BSV[Double] => (v.index.toList, v.data) } // Initialize the variational distribution q(theta|gamma) for the mini-batch val gammad: BDV[Double] = new Gamma(gammaShape, 1.0 / gammaShape).samplesVector(k) // K val expElogthetad: BDV[Double] = exp(LDAUtils.dirichletExpectation(gammad)) // K val expElogbetad = expElogbeta(ids, ::).toDenseMatrix // ids * K val phiNorm: BDV[Double] = expElogbetad * expElogthetad :+ 1e-100 // ids var meanGammaChange = 1D val ctsVector = new BDV[Double](cts) // ids // Iterate between gamma and phi until convergence while (meanGammaChange > 1e-3) { val lastgamma = gammad.copy // K K * ids ids gammad := (expElogthetad :* (expElogbetad.t * (ctsVector :/ phiNorm))) :+ alpha expElogthetad := exp(LDAUtils.dirichletExpectation(gammad)) // TODO: Keep more values in log space, and only exponentiate when needed. phiNorm := expElogbetad * expElogthetad :+ 1e-100 meanGammaChange = sum(abs(gammad - lastgamma)) / k } val sstatsd = expElogthetad.asDenseMatrix.t * (ctsVector :/ phiNorm).asDenseMatrix (gammad, sstatsd, ids) } }
Example 3
Source File: ARDRBFKernel.scala From spark-gp with Apache License 2.0 | 5 votes |
package org.apache.spark.ml.commons.kernel import breeze.linalg.{norm, DenseMatrix => BDM, DenseVector => BDV, Vector => BV} import breeze.numerics.{exp, inf} import org.apache.spark.ml.linalg.Vector class ARDRBFKernel(private var beta: BDV[Double], private val lower: BDV[Double], private val upper: BDV[Double]) extends TrainDatasetBearingKernel with NoiselessKernel with SameOnDiagonalKernel { def this(beta: BDV[Double]) = this(beta, beta * 0d, beta * inf) def this(p : Int, beta: Double = 1, lower: Double = 0, upper : Double = inf) = this(BDV.zeros[Double](p) + beta, BDV.zeros[Double](p) + lower, BDV.zeros[Double](p) + upper) override def setHyperparameters(value: BDV[Double]): ARDRBFKernel.this.type = { beta = value this } override def getHyperparameters: BDV[Double] = beta override def numberOfHyperparameters: Int = beta.length override def hyperparameterBoundaries: (BDV[Double], BDV[Double]) = (lower, upper) private def kernelElement(a: BV[Double], b: BV[Double]) : Double = { val weightedDistance = norm((a - b) *:* beta) exp(- weightedDistance * weightedDistance) } override def trainingKernel(): BDM[Double] = { val train = getTrainingVectors val result = BDM.zeros[Double](train.length, train.length) for (i <- train.indices; j <- 0 to i) { val k = kernelElement(train(i).asBreeze, train(j).asBreeze) result(i, j) = k result(j, i) = k } result } override def trainingKernelAndDerivative(): (BDM[Double], Array[BDM[Double]]) = { val train = getTrainingVectors val K = trainingKernel() val minus2Kernel = -2d * K val result = Array.fill[BDM[Double]](beta.length)(BDM.zeros[Double](train.length, train.length)) for (i <- train.indices; j <- 0 to i) { val diff = train(i).asBreeze - train(j).asBreeze diff :*= diff diff :*= beta val betaXi_Xj = diff for (k <- 0 until beta.length) { result(k)(i, j) = betaXi_Xj(k) result(k)(j, i) = betaXi_Xj(k) } } (K, result.map(derivative => derivative *:* minus2Kernel)) } override def crossKernel(test: Array[Vector]): BDM[Double] = { val train = getTrainingVectors val result = BDM.zeros[Double](test.length, train.length) for (testIndx <- test.indices; trainIndex <- train.indices) result(testIndx, trainIndex) = kernelElement(train(trainIndex).asBreeze, test(testIndx).asBreeze) result } override def selfKernel(test: Vector): Double = 1d override def toString = "ARDRBFKernel(beta=" + BDV2String(beta) + ")" private def BDV2String(v : BDV[Double]) = v.valuesIterator.map(e => f"$e%1.1e").mkString("[", ", " , "]") }
Example 4
Source File: RBFKernel.scala From spark-gp with Apache License 2.0 | 5 votes |
package org.apache.spark.ml.commons.kernel import breeze.linalg.{DenseMatrix => BDM, DenseVector => BDV} import breeze.numerics.{exp, inf} import org.apache.spark.ml.linalg.{Vector, Vectors} class RBFKernel(private var sigma: Double, private val lower: Double = 1e-6, private val upper: Double = inf) extends TrainDatasetBearingKernel with NoiselessKernel with SameOnDiagonalKernel { def this() = this(1) override def setHyperparameters(value: BDV[Double]): RBFKernel.this.type = { sigma = value(0) this } override def getHyperparameters: BDV[Double] = BDV[Double](sigma) override def numberOfHyperparameters: Int = 1 private def getSigma() = sigma private var squaredDistances: Option[BDM[Double]] = None override def hyperparameterBoundaries: (BDV[Double], BDV[Double]) = { (BDV[Double](lower), BDV[Double](upper)) } override def setTrainingVectors(vectors: Array[Vector]): this.type = { super.setTrainingVectors(vectors) val sqd = BDM.zeros[Double](vectors.length, vectors.length) for (i <- vectors.indices; j <- 0 to i) { val dist = Vectors.sqdist(vectors(i), vectors(j)) sqd(i, j) = dist sqd(j, i) = dist } squaredDistances = Some(sqd) this } override def trainingKernel(): BDM[Double] = { val result = squaredDistances.getOrElse(throw new TrainingVectorsNotInitializedException) / (-2d * sqr(getSigma())) exp.inPlace(result) result } override def trainingKernelAndDerivative(): (BDM[Double], Array[BDM[Double]]) = { val sqd = squaredDistances.getOrElse(throw new TrainingVectorsNotInitializedException) val kernel = trainingKernel() val derivative = sqd *:* kernel derivative /= cube(getSigma()) (kernel, Array(derivative)) } override def crossKernel(test: Array[Vector]): BDM[Double] = { val train = getTrainingVectors val result = BDM.zeros[Double](test.length, train.length) for (i <- test.indices; j <- train.indices) result(i, j) = Vectors.sqdist(test(i), train(j)) / (-2d * sqr(getSigma())) exp.inPlace(result) result } override def selfKernel(test: Vector): Double = 1d private def sqr(x: Double) = x * x private def cube(x: Double) = x * x * x override def toString = f"RBFKernel(sigma=$sigma%1.1e)" }
Example 5
Source File: Emmax.scala From seqspark with Apache License 2.0 | 5 votes |
package org.dizhang.seqspark.stat import breeze.linalg.{DenseMatrix=>BDM,CSCMatrix=>BSM,DenseVector=>BDV,SparseVector=>BSV, _} import breeze.numerics.{exp, pow, log} import org.apache.spark.mllib.linalg.distributed.{RowMatrix => RM} trait Emmax { } object Emmax { def eigenH(K: BDM[Double]): (BDV[Double], BDM[Double]) = { val res = eigSym(K) (res.eigenvalues - 1.0, res.eigenvectors) } def eigenSHS(K: BDM[Double], X: BDM[Double]): (BDV[Double], BDM[Double]) = { val n = X.rows val q = X.cols val Xt = X.t val S = BDM.eye[Double](n) - X * inv(Xt * X) * Xt val res = eigSym(S * (K + BDM.eye[Double](n)) * S) (res.eigenvalues(0 until (n-q)) - 1.0, res.eigenvectors(::, 0 until (n-q))) } def mle(y: BDV[Double], X: BDM[Double], K: BDM[Double], ngrids :Int =100, llim: Int = -10, ulim: Int =10, esp: Double = 1e-10) = { val n: Int = y.length val t = K.rows val q = X.cols val eigH = eigenH(K) val eigSHS = eigenSHS(K, X) val etas: BDV[Double] = (y.t * eigSHS._2).t val logDelta: BDV[Double] = BDV((0 to ngrids).map(x => x/ngrids.toDouble * (ulim - llim) + llim): _*) val m = logDelta.length val delta: BDV[Double] = exp(logDelta) val lambdas: BDM[Double] = tile(eigSHS._1, 1, m) + tile(delta, 1, n - q).t val xis: BDM[Double] = tile(eigH._1, 1, m) + tile(delta, 1, n).t val etasq: BDM[Double] = tile(pow(etas, 2), 1, n-q).t val ll: BDV[Double] = 0.5 * (n.toDouble * (log(n/(2*math.Pi)) - 1 - log(sum(etasq /:/ lambdas, Axis._0).t)) - sum(log(xis), Axis._0).t ) val dLl: BDV[Double] = 0.5 * delta *:* (n.toDouble * sum(etasq /:/ pow(lambdas, 2), Axis._0).t /:/ sum(etasq /:/ lambdas, Axis._0).t - sum(1.0 / xis, Axis._0).t) } }
Example 6
Source File: Hedge.scala From banditsbook-scala with MIT License | 5 votes |
package com.github.everpeace.banditsbook.algorithm.hedge import breeze.linalg.Vector._ import breeze.linalg._ import breeze.numerics.exp import breeze.stats.distributions.{Rand, RandBasis} import breeze.storage.Zero import com.github.everpeace.banditsbook.algorithm._ import com.github.everpeace.banditsbook.arm.Arm import scala.collection.immutable.Seq import scala.reflect.ClassTag object Hedge { case class State(η: Double, counts: Vector[Int], gains: Vector[Double]) def Algorithm(η: Double)(implicit zeroReward: Zero[Double], zeroInt: Zero[Int], tag: ClassTag[Double], rand: RandBasis = Rand) = { require(η > 0, "η must be positive.") new Algorithm[Double, State] { override def initialState(arms: Seq[Arm[Double]]): State = State( η, zeros(arms.size), zeros(arms.size) ) override def selectArm(arms: Seq[Arm[Double]], state: State): Int = { val gains = state.gains val η = state.η val p = exp(gains / η) / sum(exp(gains / η)) CategoricalDistribution(p).draw } override def updateState(arms: Seq[Arm[Double]], state: State, chosen: Int, reward: Double): State = { val counts = state.counts val gains = state.gains val count = counts(chosen) + 1 counts.update(chosen, count) val expectation = gains(chosen) + reward gains.update(chosen, expectation) state.copy(counts = counts, gains = gains) } } } }
Example 7
Source File: Exp3.scala From banditsbook-scala with MIT License | 5 votes |
package com.github.everpeace.banditsbook.algorithm.exp3 import breeze.linalg.Vector._ import breeze.linalg._ import breeze.numerics.exp import breeze.stats.distributions.{Rand, RandBasis} import breeze.storage.Zero import com.github.everpeace.banditsbook.algorithm._ import com.github.everpeace.banditsbook.arm.Arm import scala.collection.immutable.Seq import scala.reflect.ClassTag object Exp3 { case class State(γ: Double, weights: Vector[Double], counts: Vector[Int]) def Algorithm(γ: Double)(implicit zeroReward: Zero[Double], zeroInt: Zero[Int], tag: ClassTag[Double], rand: RandBasis = Rand) = { require(0< γ && γ <= 1, "γ must be in (0,1]") new Algorithm[Double, State] { override def initialState(arms: Seq[Arm[Double]]): State = State( γ, fill(arms.size)(1.0d), zeros[Int](arms.size) ) override def selectArm(arms: Seq[Arm[Double]], state: State): Int = CategoricalDistribution(probs(state.γ, state.weights)).draw() override def updateState(arms: Seq[Arm[Double]], state: State, chosen: Int, reward: Double): State = { val counts = state.counts val weights = state.weights val count = counts(chosen) + 1 counts.update(chosen, count) val K = weights.size val p = probs(state.γ, weights) val x = zeros[Double](K) x.update(chosen, reward/p(chosen)) weights *= exp((state.γ * x) / K.toDouble) state.copy(weights = weights, counts = counts) } private def probs(γ: Double, weights: Vector[Double]): Vector[Double] = { val K = weights.size // #arms ((1 - γ) * (weights / sum(weights))) + (γ / K) } } } }
Example 8
Source File: Standard.scala From banditsbook-scala with MIT License | 5 votes |
package com.github.everpeace.banditsbook.algorithm.softmax import breeze.linalg.Vector._ import breeze.linalg._ import breeze.numerics.exp import breeze.stats.distributions.{Rand, RandBasis} import breeze.storage.Zero import com.github.everpeace.banditsbook.algorithm._ import com.github.everpeace.banditsbook.arm.Arm import scala.collection.immutable.Seq import scala.reflect.ClassTag object Standard { case class State(τ: Double, counts: Vector[Int], expectations: Vector[Double]) def Algorithm(τ: Double)(implicit zeroReward: Zero[Double], zeroInt: Zero[Int], tag: ClassTag[Double], rand: RandBasis = Rand) = { require(τ > 0, "τ must be positive.") new Algorithm[Double, State] { override def initialState(arms: Seq[Arm[Double]]): State = State( τ, zeros(arms.size), zeros(arms.size) ) override def selectArm(arms: Seq[Arm[Double]], state: State): Int = { val expectations = state.expectations val τ = state.τ val p = exp(expectations / τ) / sum(exp(expectations / τ)) CategoricalDistribution(p).draw } override def updateState(arms: Seq[Arm[Double]], state: State, chosen: Int, reward: Double): State = { val counts = state.counts val expectations = state.expectations val count = counts(chosen) + 1 counts.update(chosen, count) val expectation = (((count - 1) / count.toDouble) * expectations(chosen)) + ((1 / count.toDouble) * reward) expectations.update(chosen, expectation) state.copy(counts = counts, expectations = expectations) } } } }
Example 9
Source File: SoftmaxClassifier.scala From doddle-model with Apache License 2.0 | 5 votes |
package io.picnicml.doddlemodel.linear import breeze.linalg._ import breeze.numerics.{exp, log, pow} import cats.syntax.option._ import io.picnicml.doddlemodel.data.{Features, RealVector, Simplex, Target} import io.picnicml.doddlemodel.linear.typeclasses.LinearClassifier import io.picnicml.doddlemodel.syntax.OptionSyntax._ case class SoftmaxClassifier private (lambda: Float, numClasses: Option[Int], private val w: Option[RealVector]) { private var yPredProbaCache: Simplex = _ } object SoftmaxClassifier { def apply(lambda: Float = 0.0f): SoftmaxClassifier = { require(lambda >= 0.0f, "L2 regularization strength must be non-negative") SoftmaxClassifier(lambda, none, none) } private val wSlice: Range.Inclusive = 1 to -1 @SerialVersionUID(0L) implicit lazy val ev: LinearClassifier[SoftmaxClassifier] = new LinearClassifier[SoftmaxClassifier] { override def numClasses(model: SoftmaxClassifier): Option[Int] = model.numClasses override protected def w(model: SoftmaxClassifier): Option[RealVector] = model.w override protected[doddlemodel] def copy(model: SoftmaxClassifier, numClasses: Int): SoftmaxClassifier = model.copy(numClasses = numClasses.some) override protected def copy(model: SoftmaxClassifier, w: RealVector): SoftmaxClassifier = model.copy(w = w.some) override protected def predictStateless(model: SoftmaxClassifier, w: RealVector, x: Features): Target = convert(argmax(predictProbaStateless(model, w, x)(*, ::)), Float) override protected def predictProbaStateless(model: SoftmaxClassifier, w: RealVector, x: Features): Simplex = { val z = x * w.asDenseMatrix.reshape(x.cols, model.numClasses.getOrBreak - 1, View.Require) val maxZ = max(z) val zExpPivot = DenseMatrix.horzcat(exp(z - maxZ), DenseMatrix.fill[Float](x.rows, 1)(exp(-maxZ))) zExpPivot(::, *) /:/ sum(zExpPivot(*, ::)) } override protected[linear] def lossStateless(model: SoftmaxClassifier, w: RealVector, x: Features, y: Target): Float = { model.yPredProbaCache = predictProbaStateless(model, w, x) val yPredProbaOfTrueClass = 0 until x.rows map { rowIndex => val targetClass = y(rowIndex).toInt model.yPredProbaCache(rowIndex, targetClass) } val wMatrix = w.asDenseMatrix.reshape(x.cols, model.numClasses.getOrBreak - 1, View.Require) sum(log(DenseMatrix(yPredProbaOfTrueClass))) / (-x.rows.toFloat) + .5f * model.lambda * sum(pow(wMatrix(wSlice, ::), 2)) } override protected[linear] def lossGradStateless(model: SoftmaxClassifier, w: RealVector, x: Features, y: Target): RealVector = { val yPredProba = model.yPredProbaCache(::, 0 to -2) val indicator = DenseMatrix.zeros[Float](yPredProba.rows, yPredProba.cols) 0 until indicator.rows foreach { rowIndex => val targetClass = y(rowIndex).toInt if (targetClass < model.numClasses.getOrBreak - 1) indicator(rowIndex, targetClass) = 1.0f } val grad = (x.t * (indicator - yPredProba)) / (-x.rows.toFloat) val wMatrix = w.asDenseMatrix.reshape(x.cols, model.numClasses.getOrBreak - 1, View.Require) grad(wSlice, ::) += model.lambda * wMatrix(wSlice, ::) grad.toDenseVector } } }
Example 10
Source File: PoissonRegression.scala From doddle-model with Apache License 2.0 | 5 votes |
package io.picnicml.doddlemodel.linear import breeze.linalg.{all, sum} import breeze.numerics.{exp, floor, isFinite, log} import cats.syntax.option._ import io.picnicml.doddlemodel.data.{Features, RealVector, Target} import io.picnicml.doddlemodel.linear.typeclasses.LinearRegressor case class PoissonRegression private (lambda: Float, private val w: Option[RealVector]) { private var yPredMeanCache: Target = _ } object PoissonRegression { def apply(lambda: Float = 0.0f): PoissonRegression = { require(lambda >= 0.0f, "L2 regularization strength must be non-negative") PoissonRegression(lambda, none) } private val wSlice: Range.Inclusive = 1 to -1 @SerialVersionUID(0L) implicit lazy val ev: LinearRegressor[PoissonRegression] = new LinearRegressor[PoissonRegression] { override protected def w(model: PoissonRegression): Option[RealVector] = model.w override protected def copy(model: PoissonRegression): PoissonRegression = model.copy() override protected def copy(model: PoissonRegression, w: RealVector): PoissonRegression = model.copy(w = w.some) override protected def targetVariableAppropriate(y: Target): Boolean = y == floor(y) && all(isFinite(y)) override protected def predictStateless(model: PoissonRegression, w: RealVector, x: Features): Target = floor(this.predictMean(w, x)) private def predictMean(w: RealVector, x: Features): Target = exp(x * w) override protected[linear] def lossStateless(model: PoissonRegression, w: RealVector, x: Features, y: Target): Float = { model.yPredMeanCache = predictMean(w, x) sum(y * log(model.yPredMeanCache) - model.yPredMeanCache) / (-x.rows.toFloat) + .5f * model.lambda * (w(wSlice).t * w(wSlice)) } override protected[linear] def lossGradStateless(model: PoissonRegression, w: RealVector, x: Features, y: Target): RealVector = { val grad = ((model.yPredMeanCache - y).t * x).t / x.rows.toFloat grad(wSlice) += model.lambda * w(wSlice) grad } } }
Example 11
Source File: funcTest.scala From scalaLSTM with Apache License 2.0 | 5 votes |
package com.xuanyuansen.algo import breeze.linalg._ import breeze.numerics.exp object funcTest { def main(args: Array[String]) { val x = DenseVector.ones[Double](2) val y = DenseVector.ones[Double](2) println(x) println(1.0 - x dot y) println(1.0 - x :* y) println(DenseVector.vertcat(x, y)) val Wf = DenseMatrix.rand[Double](4, 2) println(Wf) val input_dim = 4 val out_dim = 2 val concat_len = input_dim + out_dim val WW = DenseMatrix.ones[Double](out_dim, concat_len) //2*6 val kk = DenseMatrix.ones[Double](concat_len, 1) println("WW * kk") val tmp = (WW * kk).asInstanceOf[DenseMatrix[Double]] println(tmp) val diffo = DenseMatrix.ones[Double](1, out_dim) val input_h = DenseMatrix.ones[Double](1, out_dim) val input_x = DenseMatrix.ones[Double](1, input_dim) + DenseMatrix.ones[Double](1, input_dim) // 1*2 1*2 val dinputh = diffo.t * input_h println(dinputh) //2*2 //1*2 1*4 val dinputx = diffo.t * input_x println(dinputx) //2*4 val out = DenseMatrix.horzcat(dinputx, dinputh) //concat 2*6 println(out) val softmax = exp(kk) println(softmax) val sumsoft = sum(softmax) val finalout = softmax / sumsoft println("finalout " + finalout.toString) val test = DenseMatrix.rand[Double](2, 3) println(test) val idx_max = argmax(test) println(idx_max) println(max(test)) val z = DenseMatrix.zeros[Double](2, 3) z(idx_max) = 1 println(z) println(sum(exp(test)) - max(test)) println((0 until 4).reverse) val zout = 2.0 * DenseMatrix.ones[Double](2, 3) println(zout) println(1.0 / zout + 6.0) val out1 = DenseMatrix.create(5, 1, Array(1, 2, 3, 4, 5)) println(out1) } }