Java Code Examples for org.apache.mahout.math.Vector#dot()
The following examples show how to use
org.apache.mahout.math.Vector#dot() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: GeneralF1Predictor.java From pyramid with Apache License 2.0 | 6 votes |
public static MultiLabel exhaustiveSearch(int numClasses, Matrix lossMatrix, List<Double> probabilities){ double bestScore = Double.POSITIVE_INFINITY; Vector vector = new DenseVector(probabilities.size()); for (int i=0;i<vector.size();i++){ vector.set(i,probabilities.get(i)); } List<MultiLabel> multiLabels = Enumerator.enumerate(numClasses); MultiLabel multiLabel = null; for (int j=0;j<lossMatrix.numCols();j++){ Vector column = lossMatrix.viewColumn(j); double score = column.dot(vector); System.out.println("column "+j+", expected loss = "+score); if (score < bestScore){ bestScore = score; multiLabel = multiLabels.get(j); } } return multiLabel; }
Example 2
Source File: KLLoss.java From pyramid with Apache License 2.0 | 6 votes |
public double getPenalty(){ double weightSquare = 0.0; for (int k=0; k<numClasses; k++) { Vector weightVector = cmlcrf.getWeights().getWeightsWithoutBiasForClass(k); weightSquare += weightVector.dot(weightVector); } if (regularizeAll){ for (int k=0; k<numClasses; k++) { double bias = cmlcrf.getWeights().getBiasForClass(k); weightSquare += bias*bias; } Vector labelPairVector = cmlcrf.getWeights().getAllLabelPairWeights(); weightSquare += labelPairVector.dot(labelPairVector); } return weightSquare/(2*gaussianPriorVariance); }
Example 3
Source File: CMLCRFElasticNet.java From pyramid with Apache License 2.0 | 6 votes |
/** * a special back track line search for sufficient decrease with elasticnet penalized model * reference: * An improved glmnet for l1-regularized logistic regression. * @param searchDirection * @return */ private void lineSearch(Vector searchDirection, Vector gradient){ Vector localSearchDir; double initialStepLength = 1; double shrinkage = 0.5; double c = 1e-4; double stepLength = initialStepLength; Vector start = cmlcrf.getWeights().getAllWeights(); double penalty = getPenalty(); double value = getValue(); double product = gradient.dot(searchDirection); localSearchDir = searchDirection; while(true){ Vector step = localSearchDir.times(stepLength); Vector target = start.plus(step); cmlcrf.getWeights().setWeightVector(target); double targetPenalty = getPenalty(); double targetValue = getValue(); if (targetValue <= value + c*stepLength*(product + targetPenalty - penalty)){ break; } stepLength *= shrinkage; } }
Example 4
Source File: RegressionSynthesizer.java From pyramid with Apache License 2.0 | 5 votes |
public static RegDataSet linear(){ int numData = 50; RegDataSet dataSet = RegDataSetBuilder.getBuilder() .numDataPoints(numData) .numFeatures(16000) .dense(true) .missingValue(false) .build(); Vector weights = new DenseVector(16000); weights.set(0,0.001); weights.set(1,0.001); weights.set(2,0.001); weights.set(3,0.001); for (int i=0;i<numData;i++){ for (int j=0;j<16000;j++){ BernoulliDistribution bernoulliDistribution = new BernoulliDistribution(0.5); int sample = bernoulliDistribution.sample(); if (sample==0){ dataSet.setFeatureValue(i,j,-1); } else { dataSet.setFeatureValue(i,j,1); } } double label = weights.dot(dataSet.getRow(i)); dataSet.setLabel(i,label); } return dataSet; }
Example 5
Source File: CRFLoss.java From pyramid with Apache License 2.0 | 5 votes |
/** * @return negative log-likelihood */ @Override public double getValue() { if (isValueCacheValid) { return this.value; } double weightSquare = 0.0; for (int k=0; k<numClasses; k++) { Vector weightVector = cmlcrf.getWeights().getWeightsWithoutBiasForClass(k); weightSquare += weightVector.dot(weightVector); } if (regularizeAll){ for (int k=0; k<numClasses; k++) { double bias = cmlcrf.getWeights().getBiasForClass(k); weightSquare += bias*bias; } Vector labelPairVector = cmlcrf.getWeights().getAllLabelPairWeights(); weightSquare += labelPairVector.dot(labelPairVector); } this.value = getValueForAllData() + weightSquare/(2*gaussianPriorVariance); this.isValueCacheValid = true; return this.value; }
Example 6
Source File: CRFF1Loss.java From pyramid with Apache License 2.0 | 5 votes |
/** * @return negative log-likelihood */ @Override public double getValue() { if (isValueCacheValid) { return this.value; } double weightSquare = 0.0; for (int k=0; k<numClasses; k++) { Vector weightVector = cmlcrf.getWeights().getWeightsWithoutBiasForClass(k); weightSquare += weightVector.dot(weightVector); } if (regularizeAll){ for (int k=0; k<numClasses; k++) { double bias = cmlcrf.getWeights().getBiasForClass(k); weightSquare += bias*bias; } Vector labelPairVector = cmlcrf.getWeights().getAllLabelPairWeights(); weightSquare += labelPairVector.dot(labelPairVector); double bmmWeight = cmlcrf.getWeights().getAllWeights().get(numParameters-1); weightSquare += bmmWeight*bmmWeight; } this.value = getValueForAllData() + weightSquare/(2*gaussianPriorVariance); this.isValueCacheValid = true; return this.value; }
Example 7
Source File: IMLLogisticLoss.java From pyramid with Apache License 2.0 | 5 votes |
public double getValue(){ if (isValueCacheValid){ return this.value; } Vector parameters = getParameters(); this.value = -1*logisticRegression.dataSetLogLikelihood(dataSet) + parameters.dot(parameters)/(2*gaussianPriorVariance); this.isValueCacheValid = true; return this.value; }
Example 8
Source File: MLLogisticLoss.java From pyramid with Apache License 2.0 | 5 votes |
public double getValue(){ if (isValueCacheValid){ return this.value; } Vector parameters = getParameters(); this.value = -1*mlLogisticRegression.dataSetLogLikelihood(dataSet) + parameters.dot(parameters)/(2*gaussianPriorVariance); this.isValueCacheValid = true; return this.value; }
Example 9
Source File: ConjugateGradientDescent.java From pyramid with Apache License 2.0 | 5 votes |
public void iterate(){ Vector direction = this.oldP; lineSearcher.moveAlongDirection(direction); Vector newGradient = function.getGradient(); double beta = newGradient.dot(newGradient)/oldGradient.dot(oldGradient); Vector newP = oldP.times(beta).minus(newGradient); oldP = newP; oldGradient = newGradient; terminator.add(function.getValue()); }
Example 10
Source File: LBFGS.java From pyramid with Apache License 2.0 | 5 votes |
/** * scaling factor * @return */ double gamma(){ if (sQueue.isEmpty()){ return 1; } Vector s = sQueue.getLast(); Vector y = yQueue.getLast(); double denominator = y.dot(y); if (denominator<=0){ return 1; } return (s.dot(y)) / (y.dot(y)); }
Example 11
Source File: RidgeBinaryLogisticLoss.java From pyramid with Apache License 2.0 | 5 votes |
public double fun(Vector w) { double f = 0; Xv(w, scores); f += w.dot(w); f /= 2.0; for (int i = 0; i < numRows; i++) { double yz = labels[i] * scores.get(i); if (yz >= 0) f += regularization.get(i) * Math.log(1 + Math.exp(-yz)); else f += regularization.get(i) * (-yz + Math.log(1 + Math.exp(yz))); } return (f); }
Example 12
Source File: ElasticNetLogisticTrainer.java From pyramid with Apache License 2.0 | 5 votes |
/** * a special back track line search for sufficient decrease with elasticnet penalized model * reference: * An improved glmnet for l1-regularized logistic regression. * @param searchDirection * @return */ private void lineSearch(Vector searchDirection, Vector gradient){ Vector localSearchDir; double initialStepLength = 1; double shrinkage = 0.5; double c = 1e-4; double stepLength = initialStepLength; Vector start = logisticRegression.getWeights().getAllWeights(); double penalty = penalty(); double value = loss(penalty); if (logger.isDebugEnabled()){ logger.debug("start line search"); logger.debug("initial loss = "+loss()); } double product = gradient.dot(searchDirection); localSearchDir = searchDirection; while(true){ Vector step = localSearchDir.times(stepLength); Vector target = start.plus(step); logisticRegression.getWeights().setWeightVector(target); double targetPenalty = penalty(); double targetValue = loss(targetPenalty); if (targetValue <= value + c*stepLength*(product + targetPenalty - penalty)){ if (logger.isDebugEnabled()){ logger.debug("step size = "+stepLength); logger.debug("final loss = "+targetValue); logger.debug("line search done"); } break; } stepLength *= shrinkage; } }
Example 13
Source File: TrustRegionNewtonOptimizer.java From pyramid with Apache License 2.0 | 4 votes |
void tron(Vector w) { int numColumns = loss.getNumColumns(); double delta, snorm, one = 1.0; double alpha, f, fnew, prered, actred, gs; int search = 1, iter = 1; Vector w_new = new DenseVector(numColumns); Vector g = new DenseVector(numColumns); for (int i = 0; i < numColumns; i++) w.set(i,0); f = loss.fun(w); loss.grad(w, g); delta = g.norm(2); double gnorm1 = delta; double gnorm = gnorm1; if (gnorm <= eps * gnorm1) search = 0; iter = 1; while (iter <= maxIter && search != 0) { Pair<Vector,Vector> result = trcg(delta, g); Vector s = result.getFirst(); Vector r = result.getSecond(); for (int j=0;j<w.size();j++){ w_new.set(j,w.get(j)); } daxpy(one, s, w_new); gs = g.dot(s); prered = -0.5 * (gs - s.dot(r)); fnew = loss.fun(w_new); // Compute the actual reduction. actred = f - fnew; // On the first iteration, adjust the initial step bound. snorm = s.norm(2); if (iter == 1) delta = Math.min(delta, snorm); // Compute prediction alpha*snorm of the step. if (fnew - f - gs <= 0) alpha = SIGMA3; else alpha = Math.max(SIGMA1, -0.5 * (gs / (fnew - f - gs))); // Update the trust region bound according to the ratio of actual to // predicted reduction. if (actred < ETA0 * prered) delta = Math.min(Math.max(alpha, SIGMA1) * snorm, SIGMA2 * delta); else if (actred < ETA1 * prered) delta = Math.max(SIGMA1 * delta, Math.min(alpha * snorm, SIGMA2 * delta)); else if (actred < ETA2 * prered) delta = Math.max(SIGMA1 * delta, Math.min(alpha * snorm, SIGMA3 * delta)); else delta = Math.max(delta, Math.min(alpha * snorm, SIGMA3 * delta)); System.out.println("f = "+f); if (actred > ETA0 * prered) { iter++; for (int j=0;j<w.size();j++){ w.set(j,w_new.get(j)); } f = fnew; loss.grad(w, g); gnorm = g.norm(2); if (gnorm <= eps * gnorm1) break; } if (f < -1.0e+32) { break; } if (Math.abs(actred) <= 0 && prered <= 0) { System.out.println("WARNING: actred and prered <= 0%n"); break; } if (Math.abs(actred) <= 1.0e-12 * Math.abs(f) && Math.abs(prered) <= 1.0e-12 * Math.abs(f)) { System.out.println("WARNING: actred and prered too small%n"); break; } } }
Example 14
Source File: TrustRegionNewtonOptimizer.java From pyramid with Apache License 2.0 | 4 votes |
/** * * @param delta input * @param g input * @return s, r */ private Pair<Vector,Vector> trcg(double delta, Vector g) { int numColumns = loss.getNumColumns(); double one = 1; Vector d = new DenseVector(numColumns); Vector Hd = new DenseVector(numColumns); double rTr, rnewTrnew, cgtol; Vector s = new DenseVector(numColumns); Vector r = new DenseVector(numColumns); Pair<Vector,Vector> result = new Pair<>(); for (int i = 0; i < numColumns; i++) { s.set(i,0); r.set(i,-g.get(i)); d.set(i,r.get(i)); } cgtol = 0.1 * g.norm(2); rTr = r.dot(r); while (true) { if (r.norm(2) <= cgtol) { break; } loss.Hv(d, Hd); double alpha = rTr / d.dot(Hd); daxpy(alpha, d, s); if (s.norm(2) > delta) { alpha = -alpha; daxpy(alpha, d, s); double std = s.dot(d); double sts = s.dot(s); double dtd = d.dot(d); double dsq = delta * delta; double rad = Math.sqrt(std * std + dtd * (dsq - sts)); if (std >= 0) alpha = (dsq - sts) / (std + rad); else alpha = (rad - std) / dtd; daxpy(alpha, d, s); alpha = -alpha; daxpy(alpha, Hd, r); break; } alpha = -alpha; daxpy(alpha, Hd, r); rnewTrnew = r.dot(r); double beta = rnewTrnew / rTr; scale(beta, d); daxpy(one, r, d); rTr = rnewTrnew; } result.setFirst(s); result.setSecond(r); return result; }
Example 15
Source File: LBFGS.java From pyramid with Apache License 2.0 | 4 votes |
public void iterate(){ if (logger.isDebugEnabled()){ logger.debug("start one iteration"); } // we need to make a copy of the gradient; should not use pointer Vector oldGradient = new DenseVector(function.getGradient()); Vector direction = findDirection(); if (logger.isDebugEnabled()){ logger.debug("norm of direction = "+direction.norm(2)); } BackTrackingLineSearcher.MoveInfo moveInfo = lineSearcher.moveAlongDirection(direction); Vector s = moveInfo.getStep(); Vector newGradient = function.getGradient(); Vector y = newGradient.minus(oldGradient); double denominator = y.dot(s); //todo what to do if denominator is not positive? // round-off errors and an ill-conditioned inverse Hessian double rho = 0; if (denominator>0){ rho = 1/denominator; } else { terminator.forceTerminate(); if (logger.isWarnEnabled()){ logger.warn("denominator <= 0, force to terminate"); } // if (logger.isWarnEnabled()){ // logger.warn("denominator <= 0, give up the current iteration; reset history, and directly jump to next iteration!"); // } // reset(); // return; } if (logger.isDebugEnabled()){ if (y.size()<100){ logger.debug("y= "+y); logger.debug("s= " + s); } logger.debug("denominator = "+denominator); logger.debug("rho = "+rho); } sQueue.add(s); yQueue.add(y); rhoQueue.add(rho); if (sQueue.size()>m){ sQueue.remove(); yQueue.remove(); rhoQueue.remove(); } double value = function.getValue(); terminator.add(value); if (logger.isDebugEnabled()){ logger.debug("finish one iteration. loss = "+value); } }
Example 16
Source File: Vectors.java From pyramid with Apache License 2.0 | 4 votes |
public static double cosine(Vector vector1, Vector vector2){ double prod = vector1.dot(vector2); return prod/(vector1.norm(2)*vector2.norm(2)); }