breeze.linalg.max Scala Examples

The following examples show how to use breeze.linalg.max. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: IndependentPixelEvaluator.scala    From scalismo-faces   with Apache License 2.0 5 votes vote down vote up
package scalismo.faces.sampling.face.evaluators

import breeze.linalg.max
import scalismo.color.{RGB, RGBA}
import scalismo.faces.image.PixelImage
import scalismo.faces.sampling.face.evaluators.PixelEvaluators.IsotropicGaussianPixelEvaluator
import scalismo.sampling.DistributionEvaluator
import scalismo.sampling.evaluators.{GaussianEvaluator, PairEvaluator}



  def isotropicGaussianConstantBackground(targetImage: PixelImage[RGBA],
                                          sdev: Double,
                                          bgSdev: Double): DistributionEvaluator[PixelImage[RGBA]] = {
    // standardized foreground evaluator, z transform to a std normal
    val pixelEvaluator: PairEvaluator[RGB] = new PairEvaluator[RGB] {
      override def logValue(first: RGB, second: RGB): Double = {
        val diff = (first - second).norm/sdev
        GaussianEvaluator.logDensity(diff, 0.0, 1.0)
      }
    }
    // background likelihood value at required distance in standard deviations
    val bgValue = GaussianEvaluator.logDensity(bgSdev, 0.0, 1.0)
    val bgEval = PixelEvaluators.ConstantPixelEvaluator[RGB](bgValue)
    IndependentPixelEvaluator(targetImage, pixelEvaluator, bgEval)
  }
} 
Example 2
Source File: LabeledIndependentPixelEvaluator.scala    From scalismo-faces   with Apache License 2.0 5 votes vote down vote up
package scalismo.faces.sampling.face.evaluators

import breeze.linalg.max
import scalismo.color.{RGB, RGBA}
import scalismo.faces.image.{LabeledPixelImage, PixelImage}
import scalismo.sampling.DistributionEvaluator
import scalismo.sampling.evaluators.PairEvaluator


class LabeledIndependentPixelEvaluator(val reference: PixelImage[RGBA], val pixelEvaluator: PairEvaluator[RGB], val bgEvaluator: DistributionEvaluator[RGB])
  extends DistributionEvaluator[LabeledPixelImage[RGBA]] {
  override def logValue(sample: LabeledPixelImage[RGBA]): Double = {
    require(sample.label.domain == reference.domain, "LabeledIndependentPixelEvaluator: images must be comparable! (different sizes)")

    // ugly while for better performance, was a nice zip/map/case before :(
    var sum: Double = 0.0
    var x: Int = 0

    // Equation 2
    while(x < reference.width) {
      var y: Int = 0
      while (y < reference.height) {
        val refCol: RGB = reference(x, y).toRGB
        val bg: Double = bgEvaluator.logValue(refCol)
        val smp: RGBA = sample.image(x, y)

        if (sample.label(x,y) == 1) {
          // this pixel is labeled as face
          if (smp.a >  1e-4) {
            // the pixel is coped by the face model: Equation 4
            val fg: Double = pixelEvaluator.logValue(refCol, smp.toRGB)
            sum += fg
          }
          else
            sum += bg // pixel is not coped by the face model
        }
        else {
          // this pixel is labeled as nonface: Equation 5 and 7
          if (smp.a > 1e-4) {
            // the pixel is coped by the face model
            sum += max(bg, pixelEvaluator.logValue(refCol, smp.toRGB))
          }
          else
            sum += bg // the pixel is not coped by the face model
        }
        y+=1
      }
      x+=1
    }
    sum
  }
  override def toString = {
    val builder = new StringBuilder(128)
    builder ++= "LabeledIndependentPixelEvaluator("
    builder ++= pixelEvaluator.toString
    builder ++= "/"
    builder ++= bgEvaluator.toString
    builder ++= ")"
    builder.mkString
  }
}

object LabeledIndependentPixelEvaluator {
  def apply(reference: PixelImage[RGBA], pixelEvaluator: PairEvaluator[RGB],  bgEvaluator: DistributionEvaluator[RGB]) = new LabeledIndependentPixelEvaluator(reference, pixelEvaluator, bgEvaluator)
} 
Example 3
Source File: QuantileSummarizer.scala    From flint   with Apache License 2.0 5 votes vote down vote up
package com.twosigma.flint.rdd.function.summarize.summarizer.subtractable

import breeze.linalg.max
import org.apache.commons.math3.stat.descriptive.rank.Percentile

import scala.reflect.ClassTag


case class QuantileSummarizer(
  p: Array[Double]
) extends LeftSubtractableSummarizer[Double, SequentialArrayQueue[Double], Array[Double]] {

  require(p.nonEmpty, "The list of quantiles must be non-empty.")

  override def zero(): SequentialArrayQueue[Double] = new SequentialArrayQueue[Double]()

  override def merge(
    u1: SequentialArrayQueue[Double],
    u2: SequentialArrayQueue[Double]
  ): SequentialArrayQueue[Double] = {
    u1.addAll(u2)
    u1
  }

  override def render(u: SequentialArrayQueue[Double]): Array[Double] = {
    // Using R-7 to be consistent with Pandas. See https://en.wikipedia.org/wiki/Quantile
    val percentileEstimator =
      new Percentile().withEstimationType(Percentile.EstimationType.R_7)
    val (begin, end, values) = u.view()
    percentileEstimator.setData(values, begin, u.size)
    // Convert scale from (0.0, 1.0] to (0.0, 100.0]
    p.map { x =>
      percentileEstimator.evaluate(x * 100.0)
    }
  }

  override def add(u: SequentialArrayQueue[Double], t: Double): SequentialArrayQueue[Double] = {
    u.add(t)
    u
  }

  override def subtract(u: SequentialArrayQueue[Double], t: Double): SequentialArrayQueue[Double] = {
    u.remove()
    u
  }
} 
Example 4
Source File: SVMKernelMatrix.scala    From DynaML   with Apache License 2.0 5 votes vote down vote up
package io.github.mandar2812.dynaml.kernels

import breeze.linalg.{DenseMatrix, DenseVector, eig, max, min}
import org.apache.log4j.{Logger, Priority}


  override def eigenDecomposition(dimensions: Int = this.dimension.toInt):
  (DenseVector[Double], DenseMatrix[Double]) = {
    logger.log(Priority.INFO, "Eigenvalue decomposition of the kernel matrix using JBlas.")
    val decomp = eig(this.kernel)
    logger.log(Priority.INFO, "Eigenvalue stats: "
      +min(decomp.eigenvalues)
      +" =< lambda =< "
      +max(decomp.eigenvalues)
    )
    (decomp.eigenvalues, decomp.eigenvectors)

  }

} 
Example 5
Source File: NonNegAdj.scala    From spectrallda-tensorspark   with Apache License 2.0 5 votes vote down vote up
package edu.uci.eecs.spectralLDA.utils

import breeze.linalg.{DenseMatrix, DenseVector, max, min}

import scala.util.control.Breaks._
import scalaxy.loops._
import scala.language.postfixOps

object NonNegativeAdjustment {
  
  def simplexProj(V: DenseVector[Double]): (DenseVector[Double], Double) = {
    // val z:Double = 1.0
    val len: Int = V.length
    val U: DenseVector[Double] = DenseVector(V.copy.toArray.sortWith(_ > _))
    val cums: DenseVector[Double] = DenseVector(AlgebraUtil.Cumsum(U.toArray).map(x => x-1))
    val Index: DenseVector[Double] = DenseVector((1 to (len + 1)).toArray.map(x => 1.0/x.toDouble))
    val InterVec: DenseVector[Double] = cums :* Index
    val TobefindMax: DenseVector[Double] = U - InterVec
    var maxIndex : Int = 0
    // find maxIndex
    breakable{
      for (i <- 0 until len optimized){
        if (TobefindMax(len - i - 1) > 0){
          maxIndex = len - i - 1
          break()
        }
      }
    }
    val theta: Double = InterVec(maxIndex)
    val P_norm: DenseVector[Double] = max(V - theta, 0.0)
    (P_norm, theta)
  }
} 
Example 6
Source File: package.scala    From hail   with MIT License 5 votes vote down vote up
package is.hail

import is.hail.stats._
import breeze.linalg.{Vector, DenseVector, max, sum}
import breeze.numerics._
import is.hail.utils._

package object experimental {

  def findMaxAC(af: Double, an: Int, ci: Double = .95): Int = {
   if (af == 0)
      0
    else {
      val quantile_limit = ci // ci for one-sided, 1-(1-ci)/2 for two-sided
      val max_ac = qpois(quantile_limit, an * af)
      max_ac
    }
  }

  def calcFilterAlleleFreq(ac: Int, an: Int, ci: Double = .95, lower: Double = 1e-10, upper: Double = 2, tol: Double = 1e-7, precision: Double = 1e-6): Double = {
    if (ac <= 1 || an == 0) // FAF should not be calculated on singletons
      0.0
    else {
      var f = (af: Double) => ac.toDouble - 1 - qpois(ci, an.toDouble * af)
      val root = uniroot(f, lower, upper, tol)
      val rounder = 1d / (precision / 100d)
      var max_af = math.round(root.getOrElse(0.0) * rounder) / rounder
      while (findMaxAC(max_af, an, ci) < ac) {
        max_af += precision
      }
      max_af - precision
    }
  }

  def calcFilterAlleleFreq(ac: Int, an: Int, ci: Double): Double = calcFilterAlleleFreq(ac, an, ci, lower = 1e-10, upper = 2, tol = 1e-7, precision = 1e-6)


  def haplotypeFreqEM(gtCounts : IndexedSeq[Int]) : IndexedSeq[Double] = {

    assert(gtCounts.size == 9, "haplotypeFreqEM requires genotype counts for the 9 possible genotype combinations.")

    val _gtCounts = new DenseVector(gtCounts.toArray)
    val nSamples = sum(_gtCounts)

    //Needs some non-ref samples to compute
    if(_gtCounts(0) >= nSamples){ return FastIndexedSeq(_gtCounts(0),0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0)}

    val nHaplotypes = 2.0*nSamples.toDouble

    
    val const_counts = new DenseVector(Array[Double](
      2.0*_gtCounts(0) + _gtCounts(1) + _gtCounts(3), //n.AB
      2.0*_gtCounts(6) + _gtCounts(3) + _gtCounts(7), //n.Ab
      2.0*_gtCounts(2) + _gtCounts(1) + _gtCounts(5), //n.aB
      2.0*_gtCounts(8) + _gtCounts(5) + _gtCounts(7)  //n.ab
    ))

    //Initial estimate with AaBb contributing equally to each haplotype
    var p_next = (const_counts +:+ new DenseVector(Array.fill[Double](4)(_gtCounts(4)/2.0))) /:/ nHaplotypes
    var p_cur = p_next +:+ 1.0

    //EM
    while(max(abs(p_next -:- p_cur)) > 1e-7){
      p_cur = p_next

      p_next = (const_counts +:+
        (new DenseVector(Array[Double](
          p_cur(0)*p_cur(3), //n.AB
          p_cur(1)*p_cur(2), //n.Ab
          p_cur(1)*p_cur(2), //n.aB
          p_cur(0)*p_cur(3)  //n.ab
        )) * (_gtCounts(4) / ((p_cur(0)*p_cur(3))+(p_cur(1)*p_cur(2)))))
        ) / nHaplotypes

    }

    return (p_next *:* nHaplotypes).toArray.toFastIndexedSeq
  }

} 
Example 7
Source File: RankingMetrics.scala    From doddle-model   with Apache License 2.0 5 votes vote down vote up
package io.picnicml.doddlemodel.metrics

import breeze.linalg.{DenseMatrix, convert, linspace, max, min}
import io.picnicml.doddlemodel.data.{RealVector, Target, numberOfTargetClasses}

import scala.collection.compat.immutable.ArraySeq

object RankingMetrics {

  
  def rocCurve(y: Target, yPredProba: RealVector, length: Int = 30): RocCurve = {
    require(length >= 5, "Number of points of the ROC-curve must be at least 3")
    require(numberOfTargetClasses(y) == 2, "ROC-curve is defined for a binary classification task")
    require(min(yPredProba) >= 0 && max(yPredProba) <= 1, "Currently ROC-curve is only defined for probability scores")

    val yPositive = y :== 1.0f
    val yNegative = !yPositive

    def fprTpr(threshold: Float): Array[Float] = {
      val yPredPositive =
        if (threshold == 0.0f) {
          // predict 1.0 if predicted probability is 0.0 to obtain coordinate (1, 1)
          (yPredProba >:> threshold) |:| (yPredProba :== threshold)
        }
        else {
          yPredProba >:> threshold
        }

      val numTp = (yPredPositive &:& yPositive).activeSize
      val numFp = (yPredPositive &:& yNegative).activeSize
      Array(numFp / yNegative.activeSize.toFloat, numTp / yPositive.activeSize.toFloat)
    }

    val thresholds = convert(linspace(1.0, 0.0, length), Float)
    val coordinates = DenseMatrix(ArraySeq.unsafeWrapArray(thresholds.toArray.map(threshold => fprTpr(threshold))):_*)
    RocCurve(coordinates(::, 0), coordinates(::, 1), thresholds)
  }
} 
Example 8
Source File: RangeScaler.scala    From doddle-model   with Apache License 2.0 5 votes vote down vote up
package io.picnicml.doddlemodel.preprocessing

import breeze.linalg.{Axis, max, min}
import cats.syntax.option._
import io.picnicml.doddlemodel.data.Feature.FeatureIndex
import io.picnicml.doddlemodel.data.{Features, RealVector}
import io.picnicml.doddlemodel.syntax.OptionSyntax._
import io.picnicml.doddlemodel.typeclasses.Transformer

case class RangeScaler private (private val scale: Option[RealVector],
                                private val minAdjustment: Option[RealVector],
                                private val range: (Float, Float),
                                private val featureIndex: FeatureIndex)


  def apply(range: (Float, Float), featureIndex: FeatureIndex): RangeScaler = {
    val (lowerBound, upperBound) = range
    require(upperBound > lowerBound, "Upper bound of range must be greater than lower bound")
    RangeScaler(none, none, range, featureIndex)
  }

  @SerialVersionUID(0L)
  implicit lazy val ev: Transformer[RangeScaler] = new Transformer[RangeScaler] {

    override def isFitted(model: RangeScaler): Boolean =
      model.scale.isDefined && model.minAdjustment.isDefined

    override def fit(model: RangeScaler, x: Features): RangeScaler = {
      val (lowerBound, upperBound) = model.range
      val numericColIndices = model.featureIndex.numerical.columnIndices
      val colMax = max(x(::, numericColIndices), Axis._0).t.toDenseVector
      val colMin = min(x(::, numericColIndices), Axis._0).t.toDenseVector
      val dataRange = colMax - colMin
      // avoid division by zero for constant features (max == min)
      dataRange(dataRange :== 0.0f) := 1.0f

      val scale = (upperBound - lowerBound) / dataRange
      val minAdjustment = lowerBound - (colMin *:* scale)

      model.copy(scale.some, minAdjustment.some)
    }

    override protected def transformSafe(model: RangeScaler, x: Features): Features = {
      val xCopy = x.copy
      val scale = model.scale.getOrBreak
      val minAdjustment = model.minAdjustment.getOrBreak
      model.featureIndex.numerical.columnIndices.zipWithIndex.foreach {
        case (colIndex, idx) =>
          xCopy(::, colIndex) := (xCopy(::, colIndex) *:* scale(idx)) +:+ minAdjustment(idx)
      }

      xCopy
    }
  }
} 
Example 9
Source File: OneHotEncoder.scala    From doddle-model   with Apache License 2.0 5 votes vote down vote up
package io.picnicml.doddlemodel.preprocessing

import breeze.linalg.{*, Axis, DenseMatrix, Vector, convert, max}
import cats.syntax.option._
import io.picnicml.doddlemodel.data.Feature.FeatureIndex
import io.picnicml.doddlemodel.data.Features
import io.picnicml.doddlemodel.syntax.OptionSyntax._
import io.picnicml.doddlemodel.typeclasses.Transformer



case class OneHotEncoder private (private val numBinaryColumns: Option[Vector[Int]],
                                  private val featureIndex: FeatureIndex)

object OneHotEncoder {

  def apply(featureIndex: FeatureIndex): OneHotEncoder = OneHotEncoder(none, featureIndex)

  @SerialVersionUID(0L)
  implicit lazy val ev: Transformer[OneHotEncoder] = new Transformer[OneHotEncoder] {

    @inline override def isFitted(model: OneHotEncoder): Boolean = model.numBinaryColumns.isDefined

    override def fit(model: OneHotEncoder, x: Features): OneHotEncoder = {
      val numBinaryColumns = convert(max(x(::, model.featureIndex.categorical.columnIndices).apply(::, *)).t, Int) + 1
      model.copy(numBinaryColumns = numBinaryColumns.some)
    }

    override protected def transformSafe(model: OneHotEncoder, x: Features): Features = {
      val xTransformed = model.featureIndex.categorical.columnIndices.zipWithIndex.foldLeft(x) {
        case (xTransformedCurrent, (colIndex, statisticIndex)) =>
          appendEncodedColumns(xTransformedCurrent, colIndex, model.numBinaryColumns.getOrBreak(statisticIndex))
      }
      xTransformed.delete(model.featureIndex.categorical.columnIndices, Axis._1)
    }

    private def appendEncodedColumns(x: Features, columnIndex: Int, numEncodedColumns: Int): Features = {
      val encoded = DenseMatrix.zeros[Float](x.rows, numEncodedColumns)
      convert(x(::, columnIndex), Int).iterator.foreach { case (rowIndex, colIndex) =>
        // if value is larger than the maximum value encountered during training it is ignored,
        // i.e. no value is set in the binary encoded matrix
        if (colIndex < numEncodedColumns) encoded(rowIndex, colIndex) = 1.0f
      }
      DenseMatrix.horzcat(x, encoded)
    }
  }
} 
Example 10
Source File: Norms.scala    From doddle-model   with Apache License 2.0 5 votes vote down vote up
package io.picnicml.doddlemodel.preprocessing

import breeze.linalg.{Axis, max, sum}
import breeze.numerics.{abs, pow, sqrt}
import io.picnicml.doddlemodel.data.{Features, RealVector}

object Norms {

  sealed trait Norm {
    def apply(x: Features): RealVector
  }

  final case object L1Norm extends Norm {
    override def apply(x: Features): RealVector = sum(abs(x), Axis._1)
  }

  final case object L2Norm extends Norm {
    override def apply(x: Features): RealVector = sqrt(sum(pow(x, 2), Axis._1))
  }

  final case object MaxNorm extends Norm {
    override def apply(x: Features): RealVector = max(abs(x), Axis._1)
  }
} 
Example 11
Source File: Forest.scala    From Clustering4Ever   with Apache License 2.0 5 votes vote down vote up
package org.clustering4ever.scala.umap

  def leafArray : DenseMatrix[Int] = {
    trees.size match {
      case 0 => - DenseMatrix.eye[Int](1)
      case _ => {
        @annotation.tailrec
        def concat(array: DenseMatrix[Int], i: Int): DenseMatrix[Int] = {
          if (i >= trees.size) {
            array
          }
          else {
            concat(DenseMatrix.vertcat(array, trees(i).indices), i + 1)
          }
        }
        concat(trees(0).indices, 1)
      }
    }
  }
}