Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#negi()
The following examples show how to use
org.nd4j.linalg.api.ndarray.INDArray#negi() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BernoulliReconstructionDistribution.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray gradient(INDArray x, INDArray preOutDistributionParams) { INDArray output = preOutDistributionParams.dup(); activationFn.getActivation(output, true); x = x.castTo(preOutDistributionParams.dataType()); INDArray diff = x.sub(output); INDArray outOneMinusOut = output.rsub(1.0).muli(output); INDArray grad = diff.divi(outOneMinusOut); grad = activationFn.backprop(preOutDistributionParams.dup(), grad).getFirst(); //Issue: if output == 0 or output == 1, then (assuming sigmoid output or similar) //sigmaPrime == 0, sigmaPrime * (x-out) / (out*(1-out)) == 0 * (x-out) / 0 -> 0/0 -> NaN. But taking limit, we want //0*(x-out)/0 == 0 -> implies 0 gradient at the far extremes (0 or 1) of the output BooleanIndexing.replaceWhere(grad, 0.0, Conditions.isNan()); return grad.negi(); }
Example 2
Source File: LossFMeasure.java From nd4j with Apache License 2.0 | 5 votes |
@Override public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) { double[] d = computeScoreNumDenom(labels, preOutput, activationFn, mask, false); double numerator = d[0]; double denominator = d[1]; if (numerator == 0.0 && denominator == 0.0) { //Zero score -> zero gradient return Nd4j.create(preOutput.shape()); } double secondTerm = numerator / (denominator * denominator); INDArray dLdOut; if (labels.size(1) == 1) { //Single binary output case dLdOut = labels.mul(1 + beta * beta).divi(denominator).subi(secondTerm); } else { //Softmax case: the getColumn(1) here is to account for the fact that we're using prob(class1) // only in the score function; column(1) is equivalent to output for the single output case dLdOut = Nd4j.create(labels.shape()); dLdOut.getColumn(1).assign(labels.getColumn(1).mul(1 + beta * beta).divi(denominator).subi(secondTerm)); } //Negate relative to description in paper, as we want to *minimize* 1.0-fMeasure, which is equivalent to // maximizing fMeasure dLdOut.negi(); INDArray dLdPreOut = activationFn.backprop(preOutput, dLdOut).getFirst(); if (mask != null) { dLdPreOut.muliColumnVector(mask); } return dLdPreOut; }
Example 3
Source File: LossFMeasure.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) { labels = labels.castTo(preOutput.dataType()); //No-op if already correct dtype double[] d = computeScoreNumDenom(labels, preOutput, activationFn, mask, false); double numerator = d[0]; double denominator = d[1]; if (numerator == 0.0 && denominator == 0.0) { //Zero score -> zero gradient return Nd4j.create(preOutput.shape()); } double secondTerm = numerator / (denominator * denominator); INDArray dLdOut; if (labels.size(1) == 1) { //Single binary output case dLdOut = labels.mul(1 + beta * beta).divi(denominator).subi(secondTerm); } else { //Softmax case: the getColumn(1) here is to account for the fact that we're using prob(class1) // only in the score function; column(1) is equivalent to output for the single output case dLdOut = Nd4j.create(labels.shape()); dLdOut.getColumn(1).assign(labels.getColumn(1).mul(1 + beta * beta).divi(denominator).subi(secondTerm)); } //Negate relative to description in paper, as we want to *minimize* 1.0-fMeasure, which is equivalent to // maximizing fMeasure dLdOut.negi(); INDArray dLdPreOut = activationFn.backprop(preOutput, dLdOut).getFirst(); if (mask != null) { dLdPreOut.muliColumnVector(mask); } return dLdPreOut; }
Example 4
Source File: GaussianReconstructionDistribution.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public INDArray gradient(INDArray x, INDArray preOutDistributionParams) { INDArray output = preOutDistributionParams.dup(); activationFn.getActivation(output, true); val size = output.size(1) / 2; INDArray mean = output.get(NDArrayIndex.all(), NDArrayIndex.interval(0, size)); INDArray logStdevSquared = output.get(NDArrayIndex.all(), NDArrayIndex.interval(size, 2 * size)); INDArray sigmaSquared = Transforms.exp(logStdevSquared, true).castTo(x.dataType()); INDArray xSubMean = x.sub(mean.castTo(x.dataType())); INDArray xSubMeanSq = xSubMean.mul(xSubMean); INDArray dLdmu = xSubMean.divi(sigmaSquared); INDArray sigma = Transforms.sqrt(sigmaSquared, true); INDArray sigma3 = Transforms.pow(sigmaSquared, 3.0 / 2); INDArray dLdsigma = sigma.rdiv(-1).addi(xSubMeanSq.divi(sigma3)); INDArray dLdlogSigma2 = sigma.divi(2).muli(dLdsigma); INDArray dLdx = Nd4j.createUninitialized(preOutDistributionParams.dataType(), output.shape()); dLdx.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.interval(0, size)}, dLdmu); dLdx.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.interval(size, 2 * size)}, dLdlogSigma2); dLdx.negi(); //dL/dz return activationFn.backprop(preOutDistributionParams.dup(), dLdx).getFirst(); }
Example 5
Source File: VPTree.java From deeplearning4j with Apache License 2.0 | 5 votes |
/** * * @param basePoint * @param distancesArr */ public void calcDistancesRelativeTo(INDArray items, INDArray basePoint, INDArray distancesArr) { switch (similarityFunction) { case "euclidean": Nd4j.getExecutioner().exec(new EuclideanDistance(items, basePoint, distancesArr, true,-1)); break; case "cosinedistance": Nd4j.getExecutioner().exec(new CosineDistance(items, basePoint, distancesArr, true, -1)); break; case "cosinesimilarity": Nd4j.getExecutioner().exec(new CosineSimilarity(items, basePoint, distancesArr, true, -1)); break; case "manhattan": Nd4j.getExecutioner().exec(new ManhattanDistance(items, basePoint, distancesArr, true, -1)); break; case "dot": Nd4j.getExecutioner().exec(new Dot(items, basePoint, distancesArr, -1)); break; case "jaccard": Nd4j.getExecutioner().exec(new JaccardDistance(items, basePoint, distancesArr, true, -1)); break; case "hamming": Nd4j.getExecutioner().exec(new HammingDistance(items, basePoint, distancesArr, true, -1)); break; default: Nd4j.getExecutioner().exec(new EuclideanDistance(items, basePoint, distancesArr, true, -1)); break; } if (invert) distancesArr.negi(); }