Java Code Examples for org.apache.commons.math3.linear.RealMatrix#subtract()
The following examples show how to use
org.apache.commons.math3.linear.RealMatrix#subtract() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NormalizeSomaticReadCountsIntegrationTest.java From gatk-protected with BSD 3-Clause "New" or "Revised" License | 6 votes |
private void assertTangentNormalized(final ReadCountCollection actualReadCounts, final ReadCountCollection preTangentNormalized, final RealMatrix betaHats, final File ponFile) { try (final HDF5File ponReader = new HDF5File(ponFile)) { final PCACoveragePoN pon = new HDF5PCACoveragePoN(ponReader); final RealMatrix inCounts = reorderTargetsToPoNOrder(preTangentNormalized, pon.getPanelTargetNames()); final RealMatrix actual = reorderTargetsToPoNOrder(actualReadCounts,pon.getPanelTargetNames()); final RealMatrix ponMat = pon.getReducedPanelCounts(); final RealMatrix projection = ponMat.multiply(betaHats); final RealMatrix expected = inCounts.subtract(projection); Assert.assertEquals(actual.getRowDimension(),expected.getRowDimension()); Assert.assertEquals(actual.getColumnDimension(),expected.getColumnDimension()); for (int i = 0; i < actual.getRowDimension(); i++) { Assert.assertEquals(actual.getRow(i),expected.getRow(i)); } } }
Example 2
Source File: SVDDenoisingUtils.java From gatk with BSD 3-Clause "New" or "Revised" License | 6 votes |
/** * Given standardized read counts specified by a row vector S (dimensions {@code 1 x M}) * and all eigensample vectors U (dimensions {@code M x K}), * returns s - s U<sub>k</sub> U<sub>k</sub><sup>T</sup>, * where U<sub>k</sub> contains the first {@code numEigensamples}. */ private static RealMatrix subtractProjection(final RealMatrix standardizedValues, final double[][] eigensampleVectors, final int numEigensamples) { if (numEigensamples == 0) { return standardizedValues.copy(); } final int numIntervals = eigensampleVectors.length; final int numAllEigensamples = eigensampleVectors[0].length; logger.info("Distributing the standardized read counts..."); logger.info("Composing eigensample matrix for the requested number of eigensamples and transposing them..."); final RealMatrix eigensampleTruncatedMatrix = numEigensamples == numAllEigensamples ? new Array2DRowRealMatrix(eigensampleVectors, false) : new Array2DRowRealMatrix(eigensampleVectors, false).getSubMatrix(0, numIntervals - 1, 0, numEigensamples - 1); logger.info("Computing projection..."); final RealMatrix projection = standardizedValues .multiply(eigensampleTruncatedMatrix) .multiply(eigensampleTruncatedMatrix.transpose()); logger.info("Subtracting projection..."); return standardizedValues.subtract(projection); }
Example 3
Source File: CheckUtil.java From nd4j with Apache License 2.0 | 6 votes |
/** Same as checkMmul, but for matrix subtraction */ public static boolean checkSubtract(INDArray first, INDArray second, double maxRelativeDifference, double minAbsDifference) { RealMatrix rmFirst = convertToApacheMatrix(first); RealMatrix rmSecond = convertToApacheMatrix(second); INDArray result = first.sub(second); RealMatrix rmResult = rmFirst.subtract(rmSecond); if (!checkShape(rmResult, result)) return false; boolean ok = checkEntries(rmResult, result, maxRelativeDifference, minAbsDifference); if (!ok) { INDArray onCopies = Shape.toOffsetZeroCopy(first).sub(Shape.toOffsetZeroCopy(second)); printFailureDetails(first, second, rmResult, result, onCopies, "sub"); } return ok; }
Example 4
Source File: CheckUtil.java From deeplearning4j with Apache License 2.0 | 6 votes |
/** Same as checkMmul, but for matrix subtraction */ public static boolean checkSubtract(INDArray first, INDArray second, double maxRelativeDifference, double minAbsDifference) { RealMatrix rmFirst = convertToApacheMatrix(first); RealMatrix rmSecond = convertToApacheMatrix(second); INDArray result = first.sub(second); RealMatrix rmResult = rmFirst.subtract(rmSecond); if (!checkShape(rmResult, result)) return false; boolean ok = checkEntries(rmResult, result, maxRelativeDifference, minAbsDifference); if (!ok) { INDArray onCopies = Shape.toOffsetZeroCopy(first).sub(Shape.toOffsetZeroCopy(second)); printFailureDetails(first, second, rmResult, result, onCopies, "sub"); } return ok; }
Example 5
Source File: Math.java From icure-backend with GNU General Public License v2.0 | 5 votes |
public static double[] rlsInterpolation(double[] x, double[] y, int pow) { if (pow < 1) { return null; } double[] coeffs = new double[pow + 1]; double d = 1000d; for (int i = 0; i < pow + 1; i++) { coeffs[i] = 0d; } double[][] pMtx = new double[pow + 1][pow + 1]; for (int i = 0; i < pow + 1; i++) { for (int j = 0; j < pow + 1; j++) { pMtx[i][j] = (i == j) ? d : 0; } } RealMatrix wV = new Array2DRowRealMatrix(coeffs); RealMatrix pM = new Array2DRowRealMatrix(pMtx); for (int k = 0; k < x.length; k++) { double xx = x[k]; double yy = y[k]; RealMatrix xV = new Array2DRowRealMatrix(pow + 1, 1); double aPow = 1; for (int i = 0; i < pow + 1; i++) { xV.setEntry(i, 0, aPow); aPow *= xx; } double alpha = yy - wV.transpose().multiply(xV).getEntry(0, 0); RealMatrix gV = pM.multiply(xV).scalarMultiply(1 / (1d + xV.transpose().multiply(pM).multiply(xV).getEntry(0, 0))); pM = pM.subtract(gV.multiply(xV.transpose()).multiply(pM)); wV = wV.add(gV.scalarMultiply(alpha)); } return wV.getColumn(0); }
Example 6
Source File: PCATangentNormalizationUtils.java From gatk-protected with BSD 3-Clause "New" or "Revised" License | 5 votes |
/** * Tangent normalize given the raw PoN data using Spark: the code here is a little more complex for optimization purposes. * * Please see notes in docs/PoN ... * * Ahat^T = (C^T P^T) A^T * Therefore, C^T is the RowMatrix * * pinv: P * panel: A * projection: Ahat * cases: C * betahat: C^T P^T * tangentNormalizedCounts: C - Ahat */ private static PCATangentNormalizationResult tangentNormalizeSpark(final ReadCountCollection targetFactorNormalizedCounts, final RealMatrix reducedPanelCounts, final RealMatrix reducedPanelPInvCounts, final CaseToPoNTargetMapper targetMapper, final RealMatrix tangentNormalizationInputCounts, final JavaSparkContext ctx) { // Make the C^T a distributed matrix (RowMatrix) final RowMatrix caseTDistMat = SparkConverter.convertRealMatrixToSparkRowMatrix( ctx, tangentNormalizationInputCounts.transpose(), TN_NUM_SLICES_SPARK); // Spark local matrices (transposed) final Matrix pinvTLocalMat = new DenseMatrix( reducedPanelPInvCounts.getRowDimension(), reducedPanelPInvCounts.getColumnDimension(), Doubles.concat(reducedPanelPInvCounts.getData()), true).transpose(); final Matrix panelTLocalMat = new DenseMatrix( reducedPanelCounts.getRowDimension(), reducedPanelCounts.getColumnDimension(), Doubles.concat(reducedPanelCounts.getData()), true).transpose(); // Calculate the projection transpose in a distributed matrix, then convert to Apache Commons matrix (not transposed) final RowMatrix betahatDistMat = caseTDistMat.multiply(pinvTLocalMat); final RowMatrix projectionTDistMat = betahatDistMat.multiply(panelTLocalMat); final RealMatrix projection = SparkConverter.convertSparkRowMatrixToRealMatrix( projectionTDistMat, tangentNormalizationInputCounts.transpose().getRowDimension()).transpose(); // Subtract the projection from the cases final RealMatrix tangentNormalizedCounts = tangentNormalizationInputCounts.subtract(projection); // Construct the result object and return it with the correct targets. final ReadCountCollection tangentNormalized = targetMapper.fromPoNtoCaseCountCollection( tangentNormalizedCounts, targetFactorNormalizedCounts.columnNames()); final ReadCountCollection preTangentNormalized = targetMapper.fromPoNtoCaseCountCollection( tangentNormalizationInputCounts, targetFactorNormalizedCounts.columnNames()); final RealMatrix tangentBetaHats = SparkConverter.convertSparkRowMatrixToRealMatrix( betahatDistMat, tangentNormalizedCounts.getColumnDimension()); return new PCATangentNormalizationResult(tangentNormalized, preTangentNormalized, tangentBetaHats.transpose(), targetFactorNormalizedCounts); }
Example 7
Source File: MinCovDetTest.java From macrobase with Apache License 2.0 | 5 votes |
private static double getMahalanobisApache(RealVector mean, RealMatrix inverseCov, RealVector vec) { // sqrt((vec-mean)^T S^-1 (vec-mean)) RealMatrix vecT = new Array2DRowRealMatrix(vec.toArray()); RealMatrix meanT = new Array2DRowRealMatrix(mean.toArray()); RealMatrix vecSubtractMean = vecT.subtract(meanT); return Math.sqrt(vecSubtractMean.transpose() .multiply(inverseCov) .multiply(vecSubtractMean).getEntry(0, 0)); }
Example 8
Source File: PCATangentNormalizationUtils.java From gatk-protected with BSD 3-Clause "New" or "Revised" License | 3 votes |
/** * Applies tangent normalization. * <p> * The input row order should match the panel's target order. * </p> * * @param normals the log-normalized or reduced-panel counts from a panel of normals * @param input the input counts to normalize. This matrix is TxS where T is the number of targets * and S the number of count columns. * @param betaHats the beta-hats for the projection to use for the normalization. This matrix * is NxS where N is the number of samples in the panel of choice and S is the number of count columns. * @return never {@code null}. */ private static RealMatrix tangentNormalize(final RealMatrix normals, final RealMatrix input, final RealMatrix betaHats) { Utils.validateArg(input.getColumnDimension() == betaHats.getColumnDimension(), String.format("the input count column count (%d) does not match the number of columns in the beta-hats (%d)", input.getColumnDimension(), betaHats.getColumnDimension())); Utils.validateArg(normals.getColumnDimension() == betaHats.getRowDimension(), String.format("beta-hats component count (%d) does not match the number of samples in the PoN (%d)", normals.getRowDimension(), normals.getColumnDimension())); final RealMatrix projection = normals.multiply(betaHats); return input.subtract(projection); }