Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#rdivi()
The following examples show how to use
org.nd4j.linalg.api.ndarray.INDArray#rdivi() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CudaScalarsTests.java From nd4j with Apache License 2.0 | 5 votes |
@Test public void testPinnedScalarRDiv() throws Exception { // simple way to stop test if we're not on CUDA backend here assertEquals("JcublasLevel1", Nd4j.getBlasWrapper().level1().getClass().getSimpleName()); INDArray array1 = Nd4j.create(new float[]{1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f}); INDArray array2 = Nd4j.create(new float[]{2.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f}); array2.rdivi(0.5f); System.out.println("RDiv result: " + array2.getFloat(0)); assertEquals(0.25f, array2.getFloat(0), 0.01f); }
Example 2
Source File: NDArrayTestsFortran.java From nd4j with Apache License 2.0 | 5 votes |
@Test public void testRDivi() { INDArray n2 = Nd4j.valueArrayOf(new long[] {1, 2}, 4); INDArray n2Assertion = Nd4j.valueArrayOf(new long[] {1, 2}, 0.5); INDArray nRsubi = n2.rdivi(2); assertEquals(n2Assertion, nRsubi); }
Example 3
Source File: AdaGrad.java From nd4j with Apache License 2.0 | 5 votes |
public INDArray getGradient(INDArray gradient, int slice, int[] shape) { boolean historicalInitialized = false; INDArray sqrtHistory; if (this.historicalGradient == null) { this.historicalGradient = Nd4j.zeros(shape).add(epsilon); historicalInitialized = true; } else if (!this.historicalGradient.isVector() && this.historicalGradient.slice(slice).length() != gradient.length()) throw new IllegalArgumentException("Illegal gradient"); if (historicalGradient.isVector()) sqrtHistory = sqrt(historicalGradient); else sqrtHistory = !historicalInitialized ? sqrt(historicalGradient.slice(slice)) : historicalGradient; INDArray learningRates; try { learningRates = sqrtHistory.rdivi(learningRate); } catch (ArithmeticException ae) { learningRates = sqrtHistory.rdivi(learningRate + epsilon); } if (gradient.length() != learningRates.length()) gradient.muli(learningRates.slice(slice)); else gradient.muli(learningRates); this.historicalGradient.slice(slice).addi(gradient.mul(gradient)); numIterations++; //ensure no zeros return gradient; }
Example 4
Source File: NDArrayTestsFortran.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testRDivi() { INDArray n2 = Nd4j.valueArrayOf(new long[] {1, 2}, 4.0); INDArray n2Assertion = Nd4j.valueArrayOf(new long[] {1, 2}, 0.5); INDArray nRsubi = n2.rdivi(2); assertEquals(n2Assertion, nRsubi); }
Example 5
Source File: InvertMatrix.java From deeplearning4j with Apache License 2.0 | 5 votes |
/** * Inverts a matrix * @param arr the array to invert * @param inPlace Whether to store the result in {@code arr} * @return the inverted matrix */ public static INDArray invert(INDArray arr, boolean inPlace) { if(arr.rank() == 2 && arr.length() == 1){ //[1,1] edge case. Matrix inversion: [x] * [1/x] = [1] if(inPlace){ return arr.rdivi(1.0); } else { return arr.rdiv(1.0); } } if (!arr.isSquare()) { throw new IllegalArgumentException("invalid array: must be square matrix"); } //FIX ME: Please /* int[] IPIV = new int[arr.length() + 1]; int LWORK = arr.length() * arr.length(); INDArray WORK = Nd4j.create(new double[LWORK]); INDArray inverse = inPlace ? arr : arr.dup(); Nd4j.getBlasWrapper().lapack().getrf(arr); Nd4j.getBlasWrapper().lapack().getri(arr.size(0),inverse,arr.size(0),IPIV,WORK,LWORK,0);*/ RealMatrix rm = CheckUtil.convertToApacheMatrix(arr); RealMatrix rmInverse = new LUDecomposition(rm).getSolver().getInverse(); INDArray inverse = CheckUtil.convertFromApacheMatrix(rmInverse, arr.dataType()); if (inPlace) arr.assign(inverse); return inverse; }
Example 6
Source File: AdaGrad.java From deeplearning4j with Apache License 2.0 | 5 votes |
public INDArray getGradient(INDArray gradient, int slice, long[] shape) { boolean historicalInitialized = false; INDArray sqrtHistory; if (this.historicalGradient == null) { this.historicalGradient = Nd4j.zeros(shape).add(epsilon); historicalInitialized = true; } else if (!this.historicalGradient.isVector() && this.historicalGradient.slice(slice).length() != gradient.length()) throw new IllegalArgumentException("Illegal gradient"); if (historicalGradient.isVector()) sqrtHistory = sqrt(historicalGradient); else sqrtHistory = !historicalInitialized ? sqrt(historicalGradient.slice(slice)) : historicalGradient; INDArray learningRates; try { learningRates = sqrtHistory.rdivi(learningRate); } catch (ArithmeticException ae) { learningRates = sqrtHistory.rdivi(learningRate + epsilon); } if (gradient.length() != learningRates.length()) gradient.muli(learningRates.slice(slice)); else gradient.muli(learningRates); this.historicalGradient.slice(slice).addi(gradient.mul(gradient)); numIterations++; //ensure no zeros return gradient; }
Example 7
Source File: ExponentialReconstructionDistribution.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public INDArray generateAtMean(INDArray preOutDistributionParams) { //Input: gamma = log(lambda) -> lambda = exp(gamma) //Mean for exponential distribution: 1/lambda INDArray gamma = activationFn.getActivation(preOutDistributionParams.dup(), false); INDArray lambda = Transforms.exp(gamma, true); return lambda.rdivi(1.0); //mean = 1.0 / lambda }
Example 8
Source File: NDArrayColumnsMathOpTransform.java From DataVec with Apache License 2.0 | 4 votes |
@Override protected Writable doOp(Writable... input) { INDArray out = ((NDArrayWritable) input[0]).get().dup(); switch (mathOp) { case Add: for (int i = 1; i < input.length; i++) { out.addi(((NDArrayWritable) input[i]).get()); } break; case Subtract: out.subi(((NDArrayWritable) input[1]).get()); break; case Multiply: for (int i = 1; i < input.length; i++) { out.muli(((NDArrayWritable) input[i]).get()); } break; case Divide: out.divi(((NDArrayWritable) input[1]).get()); break; case ReverseSubtract: out.rsubi(((NDArrayWritable) input[1]).get()); break; case ReverseDivide: out.rdivi(((NDArrayWritable) input[1]).get()); break; case Modulus: case ScalarMin: case ScalarMax: throw new IllegalArgumentException( "Invalid MathOp: cannot use " + mathOp + " with NDArrayColumnsMathOpTransform"); default: throw new RuntimeException("Unknown MathOp: " + mathOp); } //To avoid threading issues... Nd4j.getExecutioner().commit(); return new NDArrayWritable(out); }
Example 9
Source File: NDArrayScalarOpTransform.java From DataVec with Apache License 2.0 | 4 votes |
@Override public NDArrayWritable map(Writable w) { if (!(w instanceof NDArrayWritable)) { throw new IllegalArgumentException("Input writable is not an NDArrayWritable: is " + w.getClass()); } //Make a copy - can't always assume that the original INDArray won't be used again in the future NDArrayWritable n = ((NDArrayWritable) w); INDArray a = n.get().dup(); switch (mathOp) { case Add: a.addi(scalar); break; case Subtract: a.subi(scalar); break; case Multiply: a.muli(scalar); break; case Divide: a.divi(scalar); break; case Modulus: throw new UnsupportedOperationException(mathOp + " is not supported for NDArrayWritable"); case ReverseSubtract: a.rsubi(scalar); break; case ReverseDivide: a.rdivi(scalar); break; case ScalarMin: Transforms.min(a, scalar, false); break; case ScalarMax: Transforms.max(a, scalar, false); break; default: throw new UnsupportedOperationException("Unknown or not supported op: " + mathOp); } //To avoid threading issues... Nd4j.getExecutioner().commit(); return new NDArrayWritable(a); }
Example 10
Source File: NDArrayColumnsMathOpTransform.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override protected Writable doOp(Writable... input) { INDArray out = ((NDArrayWritable) input[0]).get().dup(); switch (mathOp) { case Add: for (int i = 1; i < input.length; i++) { out.addi(((NDArrayWritable) input[i]).get()); } break; case Subtract: out.subi(((NDArrayWritable) input[1]).get()); break; case Multiply: for (int i = 1; i < input.length; i++) { out.muli(((NDArrayWritable) input[i]).get()); } break; case Divide: out.divi(((NDArrayWritable) input[1]).get()); break; case ReverseSubtract: out.rsubi(((NDArrayWritable) input[1]).get()); break; case ReverseDivide: out.rdivi(((NDArrayWritable) input[1]).get()); break; case Modulus: case ScalarMin: case ScalarMax: throw new IllegalArgumentException( "Invalid MathOp: cannot use " + mathOp + " with NDArrayColumnsMathOpTransform"); default: throw new RuntimeException("Unknown MathOp: " + mathOp); } //To avoid threading issues... Nd4j.getExecutioner().commit(); return new NDArrayWritable(out); }
Example 11
Source File: NDArrayScalarOpTransform.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public NDArrayWritable map(Writable w) { if (!(w instanceof NDArrayWritable)) { throw new IllegalArgumentException("Input writable is not an NDArrayWritable: is " + w.getClass()); } //Make a copy - can't always assume that the original INDArray won't be used again in the future NDArrayWritable n = ((NDArrayWritable) w); INDArray a = n.get().dup(); switch (mathOp) { case Add: a.addi(scalar); break; case Subtract: a.subi(scalar); break; case Multiply: a.muli(scalar); break; case Divide: a.divi(scalar); break; case Modulus: a.fmodi(scalar); break; case ReverseSubtract: a.rsubi(scalar); break; case ReverseDivide: a.rdivi(scalar); break; case ScalarMin: Transforms.min(a, scalar, false); break; case ScalarMax: Transforms.max(a, scalar, false); break; default: throw new UnsupportedOperationException("Unknown or not supported op: " + mathOp); } //To avoid threading issues... Nd4j.getExecutioner().commit(); return new NDArrayWritable(a); }
Example 12
Source File: GlobalPoolingLayer.java From deeplearning4j with Apache License 2.0 | 4 votes |
private INDArray epsilonHelperFullArray(INDArray inputArray, INDArray epsilon, int[] poolDim) { //Broadcast: occurs on the remaining dimensions, after the pool dimensions have been removed. //TODO find a more efficient way to do this int[] broadcastDims = new int[inputArray.rank() - poolDim.length]; int count = 0; for (int i = 0; i < inputArray.rank(); i++) { if (ArrayUtils.contains(poolDim, i)) continue; broadcastDims[count++] = i; } switch (poolingType) { case MAX: INDArray isMax = Nd4j.exec(new IsMax(inputArray, inputArray.ulike(), poolDim))[0]; return Nd4j.getExecutioner().exec(new BroadcastMulOp(isMax, epsilon, isMax, broadcastDims)); case AVG: //if out = avg(in,dims) then dL/dIn = 1/N * dL/dOut int n = 1; for (int d : poolDim) { n *= inputArray.size(d); } INDArray ret = inputArray.ulike(); Nd4j.getExecutioner().exec(new BroadcastCopyOp(ret, epsilon, ret, broadcastDims)); ret.divi(n); return ret; case SUM: INDArray retSum = inputArray.ulike(); Nd4j.getExecutioner().exec(new BroadcastCopyOp(retSum, epsilon, retSum, broadcastDims)); return retSum; case PNORM: int pnorm = layerConf().getPnorm(); //First: do forward pass to get pNorm array INDArray abs = Transforms.abs(inputArray, true); Transforms.pow(abs, pnorm, false); INDArray pNorm = Transforms.pow(abs.sum(poolDim), 1.0 / pnorm); //dL/dIn = dL/dOut * dOut/dIn //dOut/dIn = in .* |in|^(p-2) / ||in||_p^(p-1), where ||in||_p is the output p-norm INDArray numerator; if (pnorm == 2) { numerator = inputArray.dup(); } else { INDArray absp2 = Transforms.pow(Transforms.abs(inputArray, true), pnorm - 2, false); numerator = inputArray.mul(absp2); } INDArray denom = Transforms.pow(pNorm, pnorm - 1, false); denom.rdivi(epsilon); Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(numerator, denom, numerator, broadcastDims)); return numerator; default: throw new RuntimeException("Unknown or not supported pooling type: " + poolingType + " " + layerId()); } }
Example 13
Source File: MaskedReductionUtil.java From deeplearning4j with Apache License 2.0 | 4 votes |
public static INDArray maskedPoolingEpsilonTimeSeries(PoolingType poolingType, INDArray input, INDArray mask, INDArray epsilon2d, int pnorm) { if (input.rank() != 3) { throw new IllegalArgumentException("Expect rank 3 input activation array: got " + input.rank()); } if (mask.rank() != 2) { throw new IllegalArgumentException("Expect rank 2 array for mask: got " + mask.rank()); } if (epsilon2d.rank() != 2) { throw new IllegalArgumentException("Expected rank 2 array for errors: got " + epsilon2d.rank()); } //Mask: [minibatch, tsLength] //Epsilon: [minibatch, vectorSize] mask = mask.castTo(input.dataType()); switch (poolingType) { case MAX: INDArray negInfMask = mask.rsub(1.0); BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0)); INDArray withInf = Nd4j.createUninitialized(input.dataType(), input.shape()); Nd4j.getExecutioner().exec(new BroadcastAddOp(input, negInfMask, withInf, 0, 2)); //At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op INDArray isMax = Nd4j.exec(new IsMax(withInf, withInf.ulike(), 2))[0]; return Nd4j.getExecutioner().exec(new BroadcastMulOp(isMax, epsilon2d, isMax, 0, 1)); case AVG: case SUM: //if out = sum(in,dims) then dL/dIn = dL/dOut -> duplicate to each step and mask //if out = avg(in,dims) then dL/dIn = 1/N * dL/dOut //With masking: N differs for different time series INDArray out = Nd4j.createUninitialized(input.dataType(), input.shape(), 'f'); //Broadcast copy op, then divide and mask to 0 as appropriate Nd4j.getExecutioner().exec(new BroadcastCopyOp(out, epsilon2d, out, 0, 1)); Nd4j.getExecutioner().exec(new BroadcastMulOp(out, mask, out, 0, 2)); if (poolingType == PoolingType.SUM) { return out; } INDArray nEachTimeSeries = mask.sum(1); //[minibatchSize,tsLength] -> [minibatchSize,1] Nd4j.getExecutioner().exec(new BroadcastDivOp(out, nEachTimeSeries, out, 0)); return out; case PNORM: //Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0 INDArray masked2 = Nd4j.createUninitialized(input.dataType(), input.shape()); Nd4j.getExecutioner().exec(new BroadcastMulOp(input, mask, masked2, 0, 2)); INDArray abs = Transforms.abs(masked2, true); Transforms.pow(abs, pnorm, false); INDArray pNorm = Transforms.pow(abs.sum(2), 1.0 / pnorm); INDArray numerator; if (pnorm == 2) { numerator = input.dup(); } else { INDArray absp2 = Transforms.pow(Transforms.abs(input, true), pnorm - 2, false); numerator = input.mul(absp2); } INDArray denom = Transforms.pow(pNorm, pnorm - 1, false); denom.rdivi(epsilon2d); Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(numerator, denom, numerator, 0, 1)); Nd4j.getExecutioner().exec(new BroadcastMulOp(numerator, mask, numerator, 0, 2)); //Apply mask return numerator; default: throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType); } }
Example 14
Source File: MaskedReductionUtil.java From deeplearning4j with Apache License 2.0 | 4 votes |
public static INDArray maskedPoolingEpsilonCnn(PoolingType poolingType, INDArray input, INDArray mask, INDArray epsilon2d, int pnorm, DataType dataType) { // [minibatch, channels, h=1, w=X] or [minibatch, channels, h=X, w=1] data // with a mask array of shape [minibatch, X] //If masking along height: broadcast dimensions are [0,2] //If masking along width: broadcast dimensions are [0,3] mask = mask.castTo(dataType); //No-op if correct type //General case: must be equal or 1 on each dimension int[] dimensions = new int[4]; int count = 0; for(int i=0; i<4; i++ ){ if(input.size(i) == mask.size(i)){ dimensions[count++] = i; } } if(count < 4){ dimensions = Arrays.copyOfRange(dimensions, 0, count); } switch (poolingType) { case MAX: //TODO This is ugly - replace it with something better... Need something like a Broadcast CAS op INDArray negInfMask; if(mask.dataType() == DataType.BOOL){ negInfMask = Transforms.not(mask).castTo(dataType); } else { negInfMask = mask.rsub(1.0); } BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0)); INDArray withInf = Nd4j.createUninitialized(dataType, input.shape()); Nd4j.getExecutioner().exec(new BroadcastAddOp(input, negInfMask, withInf, dimensions)); //At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op INDArray isMax = Nd4j.exec(new IsMax(withInf, withInf.ulike(), 2, 3))[0]; return Nd4j.getExecutioner().exec(new BroadcastMulOp(isMax, epsilon2d, isMax, 0, 1)); case AVG: case SUM: //if out = sum(in,dims) then dL/dIn = dL/dOut -> duplicate to each step and mask //if out = avg(in,dims) then dL/dIn = 1/N * dL/dOut //With masking: N differs for different time series INDArray out = Nd4j.createUninitialized(dataType, input.shape(), 'f'); //Broadcast copy op, then divide and mask to 0 as appropriate Nd4j.getExecutioner().exec(new BroadcastCopyOp(out, epsilon2d, out, 0, 1)); Nd4j.getExecutioner().exec(new BroadcastMulOp(out, mask, out, dimensions)); if (poolingType == PoolingType.SUM) { return out; } //Note that with CNNs, current design is restricted to [minibatch, channels, 1, W] ot [minibatch, channels, H, 1] INDArray nEachTimeSeries = mask.sum(1,2,3); //[minibatchSize,tsLength] -> [minibatchSize,1] Nd4j.getExecutioner().exec(new BroadcastDivOp(out, nEachTimeSeries, out, 0)); return out; case PNORM: //Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0 INDArray masked2 = Nd4j.createUninitialized(dataType, input.shape()); Nd4j.getExecutioner().exec(new BroadcastMulOp(input, mask, masked2, dimensions)); INDArray abs = Transforms.abs(masked2, true); Transforms.pow(abs, pnorm, false); INDArray pNorm = Transforms.pow(abs.sum(2, 3), 1.0 / pnorm); INDArray numerator; if (pnorm == 2) { numerator = input.dup(); } else { INDArray absp2 = Transforms.pow(Transforms.abs(input, true), pnorm - 2, false); numerator = input.mul(absp2); } INDArray denom = Transforms.pow(pNorm, pnorm - 1, false); denom.rdivi(epsilon2d); Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(numerator, denom, numerator, 0, 1)); Nd4j.getExecutioner().exec(new BroadcastMulOp(numerator, mask, numerator, dimensions)); //Apply mask return numerator; default: throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType); } }