Java Code Examples for org.apache.commons.math3.linear.RealVector#add()
The following examples show how to use
org.apache.commons.math3.linear.RealVector#add() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SumVectorComposer.java From Indra with MIT License | 5 votes |
@Override public RealVector compose(List<RealVector> vectors) { if (vectors.isEmpty()) { return null; } else if (vectors.size() == 1) { return vectors.get(0); } else { RealVector sum = vectors.get(0).add(vectors.get(1)); for (int i = 2; i < vectors.size(); i++) { sum = sum.add(vectors.get(i)); } return sum; } }
Example 2
Source File: MinCovDet.java From macrobase with Apache License 2.0 | 5 votes |
private RealVector getMean(List<Datum> data) { RealVector vec = null; for (Datum d : data) { RealVector dvec = d.metrics(); if (vec == null) { vec = dvec; } else { vec = vec.add(dvec); } } return vec.mapDivide(data.size()); }
Example 3
Source File: GMMTrainer.java From pyramid with Apache License 2.0 | 5 votes |
private RealVector computeMean(int k, double sumGamma){ RealVector res = new ArrayRealVector(data.getColumnDimension()); for (int i=0;i<data.getRowDimension();i++){ res = res.add(data.getRowVector(i).mapMultiply(gammas[i][k])); } return res.mapDivide(sumGamma); }
Example 4
Source File: SDAR2D.java From incubator-hivemall with Apache License 2.0 | 4 votes |
/** * @param x series of input in LIFO order * @param k AR window size * @return x_hat predicted x * @link https://en.wikipedia.org/wiki/Matrix_multiplication#Outer_product */ @Nonnull public RealVector update(@Nonnull final ArrayRealVector[] x, final int k) { Preconditions.checkArgument(x.length >= 1, "x.length MUST be greater than 1: " + x.length); Preconditions.checkArgument(k >= 0, "k MUST be greater than or equals to 0: ", k); Preconditions.checkArgument(k < _C.length, "k MUST be less than |C| but " + "k=" + k + ", |C|=" + _C.length); final ArrayRealVector x_t = x[0]; final int dims = x_t.getDimension(); if (_initialized == false) { this._mu = x_t.copy(); this._sigma = new BlockRealMatrix(dims, dims); assert (_sigma.isSquare()); this._initialized = true; return new ArrayRealVector(dims); } Preconditions.checkArgument(k >= 1, "k MUST be greater than 0: ", k); // old parameters are accessible to compute the Hellinger distance this._muOld = _mu.copy(); this._sigmaOld = _sigma.copy(); // update mean vector // \hat{mu} := (1-r) \hat{µ} + r x_t this._mu = _mu.mapMultiply(1.d - _r).add(x_t.mapMultiply(_r)); // compute residuals (x - \hat{µ}) final RealVector[] xResidual = new RealVector[k + 1]; for (int j = 0; j <= k; j++) { xResidual[j] = x[j].subtract(_mu); } // update covariance matrices // C_j := (1-r) C_j + r (x_t - \hat{µ}) (x_{t-j} - \hat{µ})' final RealMatrix[] C = this._C; final RealVector rxResidual0 = xResidual[0].mapMultiply(_r); // r (x_t - \hat{µ}) for (int j = 0; j <= k; j++) { RealMatrix Cj = C[j]; if (Cj == null) { C[j] = rxResidual0.outerProduct(x[j].subtract(_mu)); } else { C[j] = Cj.scalarMultiply(1.d - _r) .add(rxResidual0.outerProduct(x[j].subtract(_mu))); } } // solve A in the following Yule-Walker equation // C_j = ∑_{i=1}^{k} A_i C_{j-i} where j = 1..k, C_{-i} = C_i' /* * /C_1\ /A_1\ /C_0 |C_1' |C_2' | . . . |C_{k-1}' \ * |---| |---| |--------+--------+--------+ +---------| * |C_2| |A_2| |C_1 |C_0 |C_1' | . | * |---| |---| |--------+--------+--------+ . | * |C_3| = |A_3| |C_2 |C_1 |C_0 | . | * | . | | . | |--------+--------+--------+ | * | . | | . | | . . | * | . | | . | | . . | * |---| |---| |--------+ +--------| * \C_k/ \A_k/ \C_{k-1} | . . . |C_0 / */ RealMatrix[][] rhs = MatrixUtils.toeplitz(C, k); RealMatrix[] lhs = Arrays.copyOfRange(C, 1, k + 1); RealMatrix R = MatrixUtils.combinedMatrices(rhs, dims); RealMatrix L = MatrixUtils.combinedMatrices(lhs); RealMatrix A = MatrixUtils.solve(L, R, false); // estimate x // \hat{x} = \hat{µ} + ∑_{i=1}^k A_i (x_{t-i} - \hat{µ}) RealVector x_hat = _mu.copy(); for (int i = 0; i < k; i++) { int offset = i * dims; RealMatrix Ai = A.getSubMatrix(offset, offset + dims - 1, 0, dims - 1); x_hat = x_hat.add(Ai.operate(xResidual[i + 1])); } // update model covariance // ∑ := (1-r) ∑ + r (x - \hat{x}) (x - \hat{x})' RealVector xEstimateResidual = x_t.subtract(x_hat); this._sigma = _sigma.scalarMultiply(1.d - _r) .add(xEstimateResidual.mapMultiply(_r).outerProduct(xEstimateResidual)); return x_hat; }
Example 5
Source File: KDTree.java From macrobase with Apache License 2.0 | 4 votes |
/** * Build a KD-Tree that makes the splits based on the midpoint of the widest dimension. * This is the approach described in [Gray, Moore 2003] based on [Deng, Moore 1995]. * @param data * @param leafCapacity */ public KDTree(List<Datum> data, int leafCapacity) { this.leafCapacity = leafCapacity; this.k = data.get(0).metrics().getDimension(); this.boundaries = new double[k][2]; boundaries = AlgebraUtils.getBoundingBox(data); if (data.size() > this.leafCapacity) { double[] differences = new double[this.k]; for (int i = 0; i < k; i++) { differences[i] = this.boundaries[i][1] - this.boundaries[i][0]; } int widestDimension = 0; double maxDidth = -1; for (int i = 0; i < k ; i++) { if (differences[i] > maxDidth) { maxDidth = differences[i]; widestDimension = i; } } this.splitDimension = widestDimension; // XXX: This is the slow part!!! Collections.sort(data, new DatumComparator(splitDimension)); int splitIndex = data.size() / 2; Datum belowSplit = data.get(splitIndex - 1); Datum aboveSplit = data.get(splitIndex); this.splitValue = 0.5 * ( aboveSplit.metrics().getEntry(splitDimension) + belowSplit.metrics().getEntry(splitDimension) ); this.loChild = new KDTree(data.subList(0, splitIndex), leafCapacity); this.hiChild = new KDTree(data.subList(splitIndex, data.size()), leafCapacity); this.nBelow = data.size(); this.mean = (loChild.mean.mapMultiply(loChild.nBelow) .add(hiChild.mean.mapMultiply(hiChild.nBelow)) .mapDivide(loChild.nBelow + hiChild.nBelow)); } else { this.items = data; this.nBelow = data.size(); RealMatrix ret = new Array2DRowRealMatrix(data.size(), this.k); RealVector sum = new ArrayRealVector(this.k); int index = 0; for (Datum d : data) { ret.setRow(index, d.metrics().toArray()); sum = sum.add(d.metrics()); index += 1; } this.mean = sum.mapDivide(this.nBelow); } }
Example 6
Source File: GaussNewtonOptimizer.java From astor with GNU General Public License v2.0 | 4 votes |
/** {@inheritDoc} */ public Optimum optimize(final LeastSquaresProblem lsp) { //create local evaluation and iteration counts final Incrementor evaluationCounter = lsp.getEvaluationCounter(); final Incrementor iterationCounter = lsp.getIterationCounter(); final ConvergenceChecker<Evaluation> checker = lsp.getConvergenceChecker(); // Computation will be useless without a checker (see "for-loop"). if (checker == null) { throw new NullArgumentException(); } RealVector currentPoint = lsp.getStart(); // iterate until convergence is reached Evaluation current = null; while (true) { iterationCounter.incrementCount(); // evaluate the objective function and its jacobian Evaluation previous = current; // Value of the objective function at "currentPoint". evaluationCounter.incrementCount(); current = lsp.evaluate(currentPoint); final RealVector currentResiduals = current.getResiduals(); final RealMatrix weightedJacobian = current.getJacobian(); currentPoint = current.getPoint(); // Check convergence. if (previous != null) { if (checker.converged(iterationCounter.getCount(), previous, current)) { return new OptimumImpl( current, evaluationCounter.getCount(), iterationCounter.getCount()); } } // solve the linearized least squares problem final RealVector dX = this.decomposition.solve(weightedJacobian, currentResiduals); // update the estimated parameters currentPoint = currentPoint.add(dX); } }
Example 7
Source File: GaussNewtonOptimizer.java From astor with GNU General Public License v2.0 | 4 votes |
/** {@inheritDoc} */ public Optimum optimize(final LeastSquaresProblem lsp) { //create local evaluation and iteration counts final Incrementor evaluationCounter = lsp.getEvaluationCounter(); final Incrementor iterationCounter = lsp.getIterationCounter(); final ConvergenceChecker<Evaluation> checker = lsp.getConvergenceChecker(); // Computation will be useless without a checker (see "for-loop"). if (checker == null) { throw new NullArgumentException(); } RealVector currentPoint = lsp.getStart(); // iterate until convergence is reached Evaluation current = null; while (true) { iterationCounter.incrementCount(); // evaluate the objective function and its jacobian Evaluation previous = current; // Value of the objective function at "currentPoint". evaluationCounter.incrementCount(); current = lsp.evaluate(currentPoint); final RealVector currentResiduals = current.getResiduals(); final RealMatrix weightedJacobian = current.getJacobian(); currentPoint = current.getPoint(); // Check convergence. if (previous != null) { if (checker.converged(iterationCounter.getCount(), previous, current)) { return new OptimumImpl( current, evaluationCounter.getCount(), iterationCounter.getCount()); } } // solve the linearized least squares problem final RealVector dX = this.decomposition.solve(weightedJacobian, currentResiduals); // update the estimated parameters currentPoint = currentPoint.add(dX); } }