Java Code Examples for org.nd4j.linalg.factory.Nd4j#valueArrayOf()
The following examples show how to use
org.nd4j.linalg.factory.Nd4j#valueArrayOf() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SDVariable.java From nd4j with Apache License 2.0 | 6 votes |
/** * A getter for the allocated ndarray * with this {@link SDVariable}. * * This getter will lazy initialize an array if one is not found * based on the associated shape and {@link WeightInitScheme} * if neither are found, an {@link ND4JIllegalStateException} * is thrown. * * If a {@link DifferentialFunction} is defined, note that * its getArr() method is called instead. * @return the {@link INDArray} associated with this variable. */ public INDArray getArr() { if(sameDiff.arrayAlreadyExistsForVarName(getVarName())) return sameDiff.getArrForVarName(getVarName()); //initialize value if it's actually a scalar constant (zero or 1 typically...) if(getScalarValue() != null && ArrayUtil.prod(getShape()) == 1) { INDArray arr = Nd4j.valueArrayOf(getShape(), getScalarValue().doubleValue()); sameDiff.associateArrayWithVariable(arr,this); } else if(sameDiff.getShapeForVarName(getVarName()) == null) return null; else { INDArray newAlloc = getWeightInitScheme().create(sameDiff.getShapeForVarName(getVarName())); sameDiff.associateArrayWithVariable(newAlloc,this); } return sameDiff.getArrForVarName(getVarName()); }
Example 2
Source File: SameDiffTests.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testNonScalarOutput2() { SameDiff sd = SameDiff.create(); SDVariable a = sd.reshape("a", sd.linspace("at", DataType.DOUBLE, 1, 15, 15), 3, 5); SDVariable b = sd.var("b", Nd4j.ones(DataType.DOUBLE, 3, 5)); SDVariable out = a.mul(b).mean(1); out.markAsLoss(); out.eval(); //System.out.println(out.eval()); INDArray actGrad = sd.grad("a").eval(); INDArray expGrad = Nd4j.valueArrayOf(new long[]{3, 5}, 0.2, DataType.DOUBLE); assertEquals(expGrad, actGrad); String err = OpValidation.validate(new TestCase(sd).gradientCheck(true)); assertNull(err); }
Example 3
Source File: TestComputationGraphNetwork.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testGetSetParamUnderscores(){ //Test get/set param with underscores in layer nome ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() .graphBuilder() .addInputs("in") .layer("layer_zero", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in") .layer("layer_one", new OutputLayer.Builder().nIn(10).nOut(10).build(), "layer_zero") .setOutputs("layer_one") .build(); ComputationGraph cg = new ComputationGraph(conf); cg.init(); cg.params().assign(Nd4j.linspace(1, 220, 220).reshape(1, -11)); INDArray p0w = cg.getParam("layer_zero_W"); assertEquals(Nd4j.linspace(1, 100, 100).reshape('f', 10, 10), p0w); INDArray p1b = cg.getParam("layer_one_b"); assertEquals(Nd4j.linspace(211, 220, 10).reshape(1,10), p1b); INDArray newP1b = Nd4j.valueArrayOf(new long[]{1,10}, -1.0); cg.setParam("layer_one_b", newP1b); assertEquals(newP1b, p1b); }
Example 4
Source File: LossFunctionTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testClippingXENT() { ILossFunction l1 = new LossBinaryXENT(0); ILossFunction l2 = new LossBinaryXENT(); INDArray labels = Nd4j.getExecutioner().exec(new BernoulliDistribution(Nd4j.create(3, 5), 0.5)); INDArray preOut = Nd4j.valueArrayOf(3, 5, -1000.0); IActivation a = new ActivationSigmoid(); double score1 = l1.computeScore(labels, preOut.dup(), a, null, false); assertTrue(Double.isNaN(score1)); double score2 = l2.computeScore(labels, preOut.dup(), a, null, false); assertFalse(Double.isNaN(score2)); INDArray grad1 = l1.computeGradient(labels, preOut.dup(), a, null); INDArray grad2 = l2.computeGradient(labels, preOut.dup(), a, null); MatchCondition c1 = new MatchCondition(grad1, Conditions.isNan()); MatchCondition c2 = new MatchCondition(grad2, Conditions.isNan()); int match1 = Nd4j.getExecutioner().exec(c1).getInt(0); int match2 = Nd4j.getExecutioner().exec(c2).getInt(0); assertTrue(match1 > 0); assertEquals(0, match2); }
Example 5
Source File: OpExecutionerTestsC.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testScalarReverseSub() { INDArray input = Nd4j.valueArrayOf(4,2.0); INDArray result= Nd4j.zeros(4); Nd4j.getExecutioner().exec(new ScalarReverseSubtraction(input,null,result,1.0)); INDArray assertion = Nd4j.valueArrayOf(4,-1.0); assertEquals(assertion,result); }
Example 6
Source File: NDArrayTestsFortran.java From nd4j with Apache License 2.0 | 5 votes |
@Test public void testAddScalar() { INDArray div = Nd4j.valueArrayOf(new long[] {1, 4}, 4); INDArray rdiv = div.add(1); INDArray answer = Nd4j.valueArrayOf(new long[] {1, 4}, 5); assertEquals(answer, rdiv); }
Example 7
Source File: NDArrayTestsFortran.java From nd4j with Apache License 2.0 | 5 votes |
@Test public void testAddMatrix() { INDArray five = Nd4j.ones(5); five.addi(five.dup()); INDArray twos = Nd4j.valueArrayOf(5, 2); assertEquals(getFailureMessage(), twos, five); }
Example 8
Source File: SpecialTests.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testMatchCondition(){ INDArray x = Nd4j.valueArrayOf(new long[]{10,10}, 2.0, DataType.DOUBLE); val op = new MatchCondition(x, Conditions.equals(2)); INDArray z = Nd4j.getExecutioner().exec(op); int count = z.getInt(0); assertEquals(100, count); }
Example 9
Source File: ReductionBpOpValidation.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testMeanBP() { //dL/dIn_i = dL/dOut * dOut/dIn_i = dL/dOut * (1/N * sum_j (in_j)) // = 1/N * dL/dOut // i.e., same as SUM case but divided by N //NOTE: N = num values in array //But for "along dimension" case - it's the number of elements in that TAD //Full array reduction //reduce_mean_bp op: has 2 inputs (original pre-reduce input, and gradient at output (epsilon)) for (boolean keepDims : new boolean[]{false, true}) { INDArray preReduceInput = Nd4j.linspace(1, 12, 12).reshape(3, 4); INDArray dLdOut; if (keepDims) { dLdOut = Nd4j.valueArrayOf(new long[]{1, 1}, 0.5); } else { dLdOut = Nd4j.scalar(0.5); } INDArray dLdInExpected = Nd4j.valueArrayOf(preReduceInput.shape(), 0.5 / preReduceInput.length()); INDArray dLdIn = Nd4j.createUninitialized(3, 4); String err = OpValidation.validate(new OpTestCase(new MeanBp(preReduceInput, dLdOut, dLdIn, keepDims)) .expectedOutput(0, dLdInExpected)); assertNull(err); } }
Example 10
Source File: OpExecutionerTests.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testExecutioner() { OpExecutioner opExecutioner = Nd4j.getExecutioner(); INDArray x = Nd4j.ones(5); INDArray xDup = x.dup(); INDArray solution = Nd4j.valueArrayOf(5, 2.0); opExecutioner.exec(new AddOp(new INDArray[]{x, xDup},new INDArray[]{x})); assertEquals(getFailureMessage(), solution, x); Sum acc = new Sum(x.dup()); opExecutioner.exec(acc); assertEquals(getFailureMessage(), 10.0, acc.getFinalResult().doubleValue(), 1e-1); Prod prod = new Prod(x.dup()); opExecutioner.exec(prod); assertEquals(getFailureMessage(), 32.0, prod.getFinalResult().doubleValue(), 1e-1); }
Example 11
Source File: OpExecutionerTests.java From nd4j with Apache License 2.0 | 5 votes |
@Test public void testAdd() { OpExecutioner opExecutioner = Nd4j.getExecutioner(); INDArray x = Nd4j.ones(5); INDArray xDup = x.dup(); INDArray solution = Nd4j.valueArrayOf(5, 2.0); opExecutioner.exec(new AddOp(new INDArray[]{x, xDup},new INDArray[]{x})); assertEquals(getFailureMessage(), solution, x); }
Example 12
Source File: OpExecutionerTests.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testAdd() { OpExecutioner opExecutioner = Nd4j.getExecutioner(); INDArray x = Nd4j.ones(5); INDArray xDup = x.dup(); INDArray solution = Nd4j.valueArrayOf(5, 2.0); opExecutioner.exec(new AddOp(new INDArray[]{x, xDup},new INDArray[]{x})); assertEquals(getFailureMessage(), solution, x); }
Example 13
Source File: OpExecutionerTestsC.java From nd4j with Apache License 2.0 | 5 votes |
@Test public void testMul() { OpExecutioner opExecutioner = Nd4j.getExecutioner(); INDArray x = Nd4j.ones(5); INDArray xDup = x.dup(); INDArray solution = Nd4j.valueArrayOf(5, 1.0); opExecutioner.exec(new OldMulOp(x, xDup, x)); assertEquals(solution, x); }
Example 14
Source File: IndexingTests.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testPutSimple() { INDArray x = Nd4j.linspace(1,16,16, DataType.DOUBLE).reshape('c',2,2,2,2); INDArray indexes = Nd4j.create(new double[][]{ {0},{1} }); x.put(indexes,Nd4j.create(new double[] {5,5})); INDArray vals = Nd4j.valueArrayOf(new long[] {2,2,2,2},5, DataType.DOUBLE); assertEquals(vals,x); }
Example 15
Source File: ReductionBpOpValidation.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testProdBP() { //Full array product reduction //dL/dIn_i = dL/dOut * dOut/dIn_i // = dL/dOut * d(prod(in))/dIn_i // = dL/dOut * (prod(in) / in_i) for (boolean keepDims : new boolean[]{false, true}) { INDArray preReduceInput = Nd4j.linspace(1, 12, 12).reshape(3, 4); INDArray dLdOut; if (keepDims) { dLdOut = Nd4j.valueArrayOf(new long[]{1, 1}, 0.5); } else { dLdOut = Nd4j.scalar(0.5); } double prod = preReduceInput.prodNumber().doubleValue(); INDArray dLdInExpected = Nd4j.valueArrayOf(preReduceInput.shape(), prod).divi(preReduceInput).muli(0.5); INDArray dLdIn = Nd4j.createUninitialized(3, 4); String err = OpValidation.validate(new OpTestCase(new ProdBp(preReduceInput, dLdOut, dLdIn, keepDims)) .expectedOutput(0, dLdInExpected)); assertNull(err); } }
Example 16
Source File: OpExecutionerTests.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testMul() { OpExecutioner opExecutioner = Nd4j.getExecutioner(); INDArray x = Nd4j.ones(5); INDArray xDup = x.dup(); INDArray solution = Nd4j.valueArrayOf(5, 1.0); opExecutioner.exec(new MulOp(x, xDup, x)); assertEquals(solution, x); }
Example 17
Source File: OpExecutionerTestsC.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testMul() { OpExecutioner opExecutioner = Nd4j.getExecutioner(); INDArray x = Nd4j.ones(5); INDArray xDup = x.dup(); INDArray solution = Nd4j.valueArrayOf(5, 1.0); opExecutioner.exec(new MulOp(x, xDup, x)); assertEquals(solution, x); }
Example 18
Source File: OpExecutionerTestsC.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testExecutioner() { OpExecutioner opExecutioner = Nd4j.getExecutioner(); INDArray x = Nd4j.ones(5); INDArray xDup = x.dup(); INDArray solution = Nd4j.valueArrayOf(5, 2.0); opExecutioner.exec(new AddOp(new INDArray[]{x, xDup},new INDArray[]{ x})); assertEquals(getFailureMessage(), solution, x); Sum acc = new Sum(x.dup()); opExecutioner.exec(acc); assertEquals(getFailureMessage(), 10.0, acc.getFinalResult().doubleValue(), 1e-1); Prod prod = new Prod(x.dup()); opExecutioner.exec(prod); assertEquals(getFailureMessage(), 32.0, prod.getFinalResult().doubleValue(), 1e-1); }
Example 19
Source File: NDArrayTestsFortran.java From nd4j with Apache License 2.0 | 5 votes |
@Test public void testRDivi() { INDArray n2 = Nd4j.valueArrayOf(new long[] {1, 2}, 4); INDArray n2Assertion = Nd4j.valueArrayOf(new long[] {1, 2}, 0.5); INDArray nRsubi = n2.rdivi(2); assertEquals(n2Assertion, nRsubi); }
Example 20
Source File: CudnnConvolutionHelper.java From deeplearning4j with Apache License 2.0 | 4 votes |
/** * @param poolingType Used when preparing data for subsampling layers ONLY. Null for convolution layers * @return */ public static CudnnForwardArgs getCudnnForwardArgs(INDArray input, int[] kernel, int[] strides, int[] padding, int[] dilation, ConvolutionMode convolutionMode, PoolingType poolingType, CNN2DFormat format){ INDArray origInput = input; //Check if we need to dup the input: views, non-contiguous, etc. CuDNN also seems to have has issues if strides // are non-default for C order - even if they *should* be OK otherwise if(input.isView() || !Shape.hasDefaultStridesForShape(input)){ input = input.dup('c'); } boolean nchw = format == CNN2DFormat.NCHW; int hIdx = nchw ? 2 : 1; int wIdx = nchw ? 3 : 2; val inH = input.size(hIdx); val inW = input.size(wIdx); boolean manualPadBottom = false; boolean manualPadRight = false; int[] outSize; if (convolutionMode == ConvolutionMode.Same) { outSize = ConvolutionUtils.getOutputSize(input, kernel, strides, null, convolutionMode, dilation, format); //Also performs validation padding = ConvolutionUtils.getSameModeTopLeftPadding(outSize, new int[] {(int) inH, (int) inW}, kernel, strides, dilation); int[] padBottomRight = ConvolutionUtils.getSameModeBottomRightPadding(outSize, new int[] {(int) inH, (int) inW}, kernel, strides, dilation); if(!Arrays.equals(padding, padBottomRight)){ /* CuDNN - even as of 7.1 (CUDA 9.1) still doesn't have support for proper SAME mode padding (i.e., asymmetric padding) - padding can *only* be specified as the same amount for both the top/bottom, and for left/right. In SAME mode padding, sometimes these are the same - but often they are not. Note that when they differ, the bottom or right padding will be exactly 1 more than the top or left padding. As per TF, we'll manually pad here: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/conv_ops.cc#L571-L607 */ manualPadBottom = (padding[0] != padBottomRight[0]); manualPadRight = (padding[1] != padBottomRight[1]); //NCHW format long[] newShape; if(nchw){ newShape = new long[]{input.size(0), input.size(1), input.size(2) + (manualPadBottom ? 1 : 0), input.size(3) + (manualPadRight ? 1 : 0)}; } else { newShape = new long[]{input.size(0), input.size(1) + (manualPadBottom ? 1 : 0), input.size(2) + (manualPadRight ? 1 : 0), input.size(3)}; } INDArray newInput; if(poolingType == null || poolingType != PoolingType.MAX){ newInput = Nd4j.create(input.dataType(), newShape); } else { //For max pooling, we don't want to include the padding in the maximum values. But, CuDNN doesn't knowm // that these values are padding and hence should be excluded. Instead: We'll use -infinity so that, // if the 'real' (non-padding) values are all < 0, we take the real value, not the padding value newInput = Nd4j.valueArrayOf(newShape, Double.NEGATIVE_INFINITY, input.dataType()); } if(nchw){ newInput.put(new INDArrayIndex[]{all(), all(), interval(0,input.size(2)), interval(0, input.size(3))}, input); } else { newInput.put(new INDArrayIndex[]{all(), interval(0,input.size(1)), interval(0, input.size(2)), all()}, input); } input = newInput; //Now: we've manually applied the "extra" bottom/right padding only - if required. Consequently, we // now have the same amount of padding required for top/bottom, and left/right - which we'll let // CuDNN handle } } else { outSize = ConvolutionUtils.getOutputSize(input, kernel, strides, padding, convolutionMode, dilation, format); //Also performs validation } return new CudnnForwardArgs(manualPadBottom, manualPadRight, input, origInput, padding, outSize); }