Java Code Examples for org.nd4j.autodiff.samediff.SDVariable#eval()
The following examples show how to use
org.nd4j.autodiff.samediff.SDVariable#eval() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NDLossTest.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testSparseSoftmaxCrossEntropy() { SameDiff sd = SameDiff.create(); int nOut = 4; int minibatch = 10; SDVariable predictions = sd.var("in", DataType.DOUBLE, minibatch, nOut); SDVariable labels = sd.var("labels", DataType.INT32, -1); INDArray predictionsArr = Nd4j.randn(DataType.DOUBLE, minibatch, nOut); INDArray labelsArr = Nd4j.create(DataType.INT32, minibatch); for( int i=0; i<minibatch; i++ ){ labelsArr.putScalar(i, i%nOut); } SDVariable loss = sd.loss().sparseSoftmaxCrossEntropy("loss", predictions, labels); sd.associateArrayWithVariable(predictionsArr, predictions); sd.associateArrayWithVariable(labelsArr, labels); INDArray y_exp = loss.eval(); INDArray y = Nd4j.loss().sparseSoftmaxCrossEntropy(predictionsArr, labelsArr); assertEquals(y_exp, y); }
Example 2
Source File: ShapeOpValidation.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testPermute3(){ INDArray in = Nd4j.linspace(DataType.FLOAT, 1, 6, 1).reshape(3,2); INDArray permute = Nd4j.createFromArray(1,0); // System.out.println(in); SameDiff sd = SameDiff.create(); SDVariable v = sd.var(in); SDVariable v2 = sd.constant(permute); SDVariable out = v.permute(v2); INDArray exp = in.transpose(); INDArray outArr = out.eval(); assertEquals(exp, outArr); }
Example 3
Source File: NDLossTest.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testL2Loss() { SameDiff sd = SameDiff.create(); int nOut = 4; int minibatch = 10; SDVariable predictions = sd.var("in", DataType.DOUBLE, minibatch, nOut); INDArray predictionsArr = Nd4j.randn(DataType.DOUBLE, minibatch, nOut); SDVariable loss = sd.loss().l2Loss("loss", predictions); sd.associateArrayWithVariable(predictionsArr, predictions); INDArray y_exp = loss.eval(); INDArray y = Nd4j.loss().l2Loss(predictionsArr); assertEquals(y_exp, y); }
Example 4
Source File: NDLossTest.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testWeightedCrossEntropyWithLogits() { // This one from SamediffTests.java SameDiff sameDiff = SameDiff.create(); INDArray targets = Nd4j.create(new long[]{1, 5}); INDArray inputs = Nd4j.create(new long[]{1, 5}); INDArray weights = Nd4j.create(new long[]{1, 5}); SDVariable sdInputs = sameDiff.var("inputs", inputs); SDVariable sdWeights = sameDiff.var("weights", weights); SDVariable sdTargets = sameDiff.var("targets", targets); SDVariable res = sameDiff.loss().weightedCrossEntropyWithLogits(sdTargets, sdInputs, sdWeights); INDArray resultArray = res.eval(); assertArrayEquals(new long[]{1, 5}, resultArray.shape()); // Make sure the INDArray interface produces the same result. INDArray y = Nd4j.loss().weightedCrossEntropyWithLogits(targets, inputs, weights); assertEquals(resultArray , y); }
Example 5
Source File: MiscOpValidation.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testDiagPart() { INDArray i = Nd4j.create(5,5); SameDiff sd = SameDiff.create(); SDVariable var = sd.var("in", i); SDVariable diag = sd.math().diagPart(var); INDArray out = diag.eval(); assertEquals(1, out.rank()); }
Example 6
Source File: NDLossTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testHuberLoss() { SameDiff sd = SameDiff.create(); int nOut = 4; int minibatch = 10; SDVariable predictions = sd.var("in", DataType.DOUBLE, minibatch, nOut); SDVariable labels = sd.var("labels", DataType.DOUBLE, -1, nOut); INDArray wArr = Nd4j.create(new double[][]{ {0, 0, 0, 0}, {0, 0, 1, 1}, {1, 1, 0, 0}, {1, 1, 1, 1}, {1, 1, 1, 1}, {2, 2, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 2}}); SDVariable w = sd.var("weights", wArr); LossReduce reduction = LossReduce.MEAN_BY_NONZERO_WEIGHT_COUNT; INDArray predictionsArr = Nd4j.randn(DataType.DOUBLE, minibatch, nOut); INDArray labelsArr = Nd4j.randn(DataType.DOUBLE, minibatch, nOut); SDVariable loss = sd.loss().huberLoss("loss", labels, predictions, w, reduction, 0.02); SDVariable loss2 = sd.loss().huberLoss("loss2", labels, predictions, null, reduction, 0.02); sd.associateArrayWithVariable(predictionsArr, predictions); sd.associateArrayWithVariable(labelsArr, labels); INDArray y_exp = loss.eval(); INDArray y_exp2 = loss2.eval(); INDArray y = Nd4j.loss().huberLoss(labelsArr, predictionsArr, wArr, reduction, 0.02); INDArray y2 = Nd4j.loss().huberLoss(labelsArr, predictionsArr, null, reduction, 0.02); assertEquals(y_exp, y); assertEquals(y_exp2, y2); }
Example 7
Source File: NDLossTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testHingeLoss() { SameDiff sd = SameDiff.create(); int nOut = 4; int minibatch = 10; SDVariable predictions = sd.var("in", DataType.DOUBLE, minibatch, nOut); SDVariable labels = sd.var("labels", DataType.DOUBLE, -1, nOut); INDArray wArr = Nd4j.create(new double[][]{ {0, 0, 0, 0}, {0, 0, 1, 1}, {1, 1, 0, 0}, {1, 1, 1, 1}, {1, 1, 1, 1}, {2, 2, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 2}}); SDVariable w = sd.var("weights", wArr); LossReduce reduction = LossReduce.MEAN_BY_NONZERO_WEIGHT_COUNT; INDArray predictionsArr = Nd4j.randn(DataType.DOUBLE, minibatch, nOut); INDArray labelsArr = Nd4j.randn(DataType.DOUBLE, minibatch, nOut); SDVariable loss = sd.loss().hingeLoss("loss", labels, predictions, w, reduction); SDVariable loss2 = sd.loss().hingeLoss("loss2", labels, predictions, null, reduction); sd.associateArrayWithVariable(predictionsArr, predictions); sd.associateArrayWithVariable(labelsArr, labels); INDArray y_exp = loss.eval(); INDArray y_exp2 = loss2.eval(); INDArray y = Nd4j.loss().hingeLoss(labelsArr, predictionsArr, wArr, reduction); INDArray y2 = Nd4j.loss().hingeLoss(labelsArr, predictionsArr, null, reduction); assertEquals(y_exp, y); assertEquals(y_exp2, y2); }
Example 8
Source File: NDLossTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testSoftmaxCrossEntropy() { SameDiff sd = SameDiff.create(); int nOut = 4; int minibatch = 10; SDVariable predictions = sd.var("in", DataType.DOUBLE, minibatch, nOut); SDVariable labels = sd.var("labels", DataType.DOUBLE, -1, nOut); INDArray wArr = Nd4j.scalar(1.0); //TODO: This test fails with a complex weights array. SDVariable w = null; LossReduce reduction = LossReduce.MEAN_BY_NONZERO_WEIGHT_COUNT; INDArray predictionsArr = Nd4j.randn(DataType.DOUBLE, minibatch, nOut); INDArray labelsArr = Nd4j.randn(DataType.DOUBLE, minibatch, nOut); labelsArr.assign(0); for (int i = 0; i < labelsArr.size(0); i++) { labelsArr.putScalar(i, i % labelsArr.size(1), 1.0); } double labelSmoothing = 0.0; SDVariable loss = sd.loss().softmaxCrossEntropy("loss", labels, predictions, null, reduction, labelSmoothing); SDVariable loss2 = sd.loss().softmaxCrossEntropy("loss2", labels, predictions, null, reduction, labelSmoothing); sd.associateArrayWithVariable(predictionsArr, predictions); sd.associateArrayWithVariable(labelsArr, labels); INDArray y_exp = loss.eval(); INDArray y_exp2 = loss2.eval(); INDArray y = Nd4j.loss().softmaxCrossEntropy(labelsArr, predictionsArr, wArr, reduction, labelSmoothing); INDArray y2 = Nd4j.loss().softmaxCrossEntropy(labelsArr, predictionsArr, null, reduction, labelSmoothing); assertEquals(y_exp, y); assertEquals(y_exp2, y2); }
Example 9
Source File: NDLossTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testSigmoidCrossEntropy() { SameDiff sd = SameDiff.create(); int nOut = 4; int minibatch = 10; SDVariable predictions = sd.var("in", DataType.DOUBLE, minibatch, nOut); SDVariable labels = sd.var("labels", DataType.DOUBLE, -1, nOut); INDArray wArr = Nd4j.create(new double[][]{ {0, 0, 0, 0}, {0, 0, 1, 1}, {1, 1, 0, 0}, {1, 1, 1, 1}, {1, 1, 1, 1}, {2, 2, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 2}}); SDVariable w = sd.var("weights", wArr); LossReduce reduction = LossReduce.MEAN_BY_NONZERO_WEIGHT_COUNT; INDArray predictionsArr = Nd4j.randn(DataType.DOUBLE, minibatch, nOut); INDArray labelsArr = Nd4j.randn(DataType.DOUBLE, minibatch, nOut); double labelSmoothing = 0.01; SDVariable loss = sd.loss().sigmoidCrossEntropy("loss", labels, predictions, w, reduction, labelSmoothing); SDVariable loss2 = sd.loss().sigmoidCrossEntropy("loss2", labels, predictions, null, reduction, labelSmoothing); sd.associateArrayWithVariable(predictionsArr, predictions); sd.associateArrayWithVariable(labelsArr, labels); INDArray y_exp = loss.eval(); INDArray y_exp2 = loss2.eval(); INDArray y = Nd4j.loss().sigmoidCrossEntropy(labelsArr, predictionsArr, wArr, reduction, labelSmoothing); INDArray y2 = Nd4j.loss().sigmoidCrossEntropy(labelsArr, predictionsArr, null, reduction, labelSmoothing); assertEquals(y_exp, y); assertEquals(y_exp2, y2); }
Example 10
Source File: TransformOpValidation.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testLogSumExp() { Nd4j.getRandom().setSeed(12345); INDArray inputArr = Nd4j.rand(DataType.FLOAT, 1, 4); SameDiff sd = SameDiff.create(); SDVariable in = sd.var(inputArr); SDVariable lse = sd.math().logSumExp(in); INDArray out = lse.eval(); INDArray exp = Transforms.exp(inputArr, true); INDArray sum = exp.sum(); INDArray log = Transforms.log(sum); assertEquals(log, out); }
Example 11
Source File: ShapeOpValidation.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testStridedSliceNewAxisMask() { INDArray inArr = Nd4j.linspace(1, 60, 60).reshape('c', 3, 4, 5); SameDiff sd = SameDiff.create(); SDVariable in = sd.var("in", inArr); SDVariable slice = sd.stridedSlice(in,new long[]{-999, 0, 0, 0},new long[]{-999, 3, 4, 5},new long[]{-999, 1, 1, 1}, 0, 0, 0, 1, 0); INDArray out = slice.eval(); assertArrayEquals(new long[]{1, 3, 4, 5}, out.shape()); assertEquals(inArr, out.get(point(0), all(), all(), all())); }
Example 12
Source File: ShapeOpValidation.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testReshapeGradient() { //https://github.com/deeplearning4j/deeplearning4j/issues/6873 int[] origShape = new int[]{3, 4, 5}; List<String> failed = new ArrayList<>(); for (long[] toShape : new long[][]{{3, 4 * 5}, {3 * 4, 5}, {1, 3 * 4 * 5}, {3 * 4 * 5, 1}}) { for(char order : new char[]{'c','f'}){ INDArray inArr = Nd4j.rand(DataType.DOUBLE, origShape, order).muli(100); SameDiff sd = SameDiff.create(); SDVariable in = sd.var("in", inArr); SDVariable reshape = sd.reshape(in, toShape); //Using stdev here: mean/sum would backprop the same gradient for each input... SDVariable stdev = sd.standardDeviation("out", reshape, true); INDArray out = stdev.eval(); INDArray expOut = in.getArr().std(true, Integer.MAX_VALUE); String msg = "toShape=" + Arrays.toString(toShape) + ", order=" + order; TestCase tc = new TestCase(sd); tc.testName(msg) .expectedOutput("out", expOut); String error = OpValidation.validate(tc); if(error != null){ failed.add(error); } } } assertEquals(failed.toString(), 0, failed.size()); }
Example 13
Source File: LoadTensorFlowMNISTMLP.java From dl4j-tutorials with MIT License | 5 votes |
public static void main(String[] args) throws Exception { final String FROZEN_MLP = new ClassPathResource(BASE_DIR + "/frozen_model.pb").getFile().getPath(); //Load placeholder inputs and corresponding predictions generated from tensorflow Map<String, INDArray> inputsPredictions = readPlaceholdersAndPredictions(); //Load the graph into samediff SameDiff graph = TFGraphMapper.getInstance().importGraph(new File(FROZEN_MLP)); //libnd4j executor //running with input_a array expecting to get prediction_a graph.associateArrayWithVariable(inputsPredictions.get("input_a"), graph.variableMap().get("input")); NativeGraphExecutioner executioner = new NativeGraphExecutioner(); INDArray[] results = executioner.executeGraph(graph); //returns an array of the outputs INDArray libnd4jPred = results[0]; System.out.println("LIBND4J exec prediction for input_a:\n" + libnd4jPred); if (libnd4jPred.equals(inputsPredictions.get("prediction_a"))) { //this is true and therefore predictions are equal System.out.println("Predictions are equal to tensorflow"); } else { throw new RuntimeException("Predictions don't match!"); } //Now to run with the samediff executor, with input_b array expecting to get prediction_b SameDiff graphSD = TFGraphMapper.getInstance().importGraph(new File(FROZEN_MLP)); //Reimport graph here, necessary for the 1.0 alpha release graphSD.associateArrayWithVariable(inputsPredictions.get("input_b"), graph.variableMap().get("input")); INDArray samediffPred = graphSD.execAndEndResult(); System.out.println("SameDiff exec prediction for input_b:\n" + samediffPred); if (samediffPred.equals(inputsPredictions.get("prediction_b"))) { //this is true and therefore predictions are equal System.out.println("Predictions are equal to tensorflow"); } //add to graph to demonstrate pytorch like capability System.out.println("Adding new op to graph.."); SDVariable linspaceConstant = graphSD.var("linspace", Nd4j.linspace(1, 10, 10)); SDVariable totalOutput = graphSD.getVariable("output").add(linspaceConstant); INDArray totalOutputArr = totalOutput.eval(); System.out.println(totalOutputArr); }
Example 14
Source File: MiscOpValidation.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testShapeFn2() { INDArray i = Nd4j.create(1,3); SameDiff sd = SameDiff.create(); SDVariable var = sd.var("in", i); SDVariable shape = sd.shape(var); SDVariable sum = shape.castTo(DataType.DOUBLE).sum(); sum.eval(); }
Example 15
Source File: NDLossTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testMeanSquaredError() { SameDiff sd = SameDiff.create(); int nOut = 4; int minibatch = 10; SDVariable predictions = sd.var("in", DataType.DOUBLE, minibatch, nOut); SDVariable labels = sd.var("labels", DataType.DOUBLE, -1, nOut); INDArray wArr = Nd4j.create(new double[][]{ {0, 0, 0, 0}, {0, 0, 1, 1}, {1, 1, 0, 0}, {1, 1, 1, 1}, {1, 1, 1, 1}, {2, 2, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 2}}); SDVariable w = sd.var("weights", wArr); LossReduce reduction = LossReduce.MEAN_BY_NONZERO_WEIGHT_COUNT; INDArray predictionsArr = Nd4j.randn(DataType.DOUBLE, minibatch, nOut); INDArray labelsArr = Nd4j.randn(DataType.DOUBLE, minibatch, nOut); SDVariable loss = sd.loss().meanSquaredError("loss", labels, predictions, w, reduction); SDVariable loss2 = sd.loss().meanSquaredError("loss2", labels, predictions, null, reduction); sd.associateArrayWithVariable(predictionsArr, predictions); sd.associateArrayWithVariable(labelsArr, labels); INDArray y_exp = loss.eval(); INDArray y_exp2 = loss2.eval(); INDArray y = Nd4j.loss().meanSquaredError(labelsArr, predictionsArr, wArr, reduction); INDArray y2 = Nd4j.loss().meanSquaredError(labelsArr, predictionsArr, null, reduction); assertEquals(y_exp, y); assertEquals(y_exp2, y2); }
Example 16
Source File: NDLossTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testLogPoisson() { SameDiff sd = SameDiff.create(); int nOut = 4; int minibatch = 10; SDVariable predictions = sd.var("in", DataType.DOUBLE, minibatch, nOut); SDVariable labels = sd.var("labels", DataType.DOUBLE, -1, nOut); INDArray wArr = Nd4j.create(new double[][]{ {0, 0, 0, 0}, {0, 0, 1, 1}, {1, 1, 0, 0}, {1, 1, 1, 1}, {1, 1, 1, 1}, {2, 2, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 2}, {2, 2, 2, 2}}); SDVariable w = sd.var("weights", wArr); LossReduce reduction = LossReduce.MEAN_BY_NONZERO_WEIGHT_COUNT; INDArray predictionsArr = Nd4j.randn(DataType.DOUBLE, minibatch, nOut); INDArray labelsArr = Nd4j.randn(DataType.DOUBLE, minibatch, nOut); SDVariable loss = sd.loss().logPoisson("loss", labels, predictions, w, reduction, false); SDVariable loss2 = sd.loss().logPoisson("loss2", labels, predictions, null, reduction, false); sd.associateArrayWithVariable(predictionsArr, predictions); sd.associateArrayWithVariable(labelsArr, labels); INDArray y_exp = loss.eval(); INDArray y_exp2 = loss2.eval(); INDArray y = Nd4j.loss().logPoisson(labelsArr, predictionsArr, wArr, reduction, false); INDArray y2 = Nd4j.loss().logPoisson(labelsArr, predictionsArr, null, reduction, false); assertEquals(y_exp, y); assertEquals(y_exp2, y2); }
Example 17
Source File: LayerOpValidation.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testDepthWiseConv2dBasic() { int nIn = 3; int depthWise = 4; int kH = 2; int kW = 2; int mb = 3; int imgH = 28; int imgW = 28; SameDiff sd = SameDiff.create(); INDArray depthWeightArr = Nd4j.create(kH, kW, nIn, depthWise); INDArray bArr = Nd4j.create(1, depthWise * nIn); INDArray inArr = Nd4j.create(mb, nIn, imgH, imgW); SDVariable in = sd.var("in", inArr); SDVariable dW = sd.var("dW", depthWeightArr); SDVariable b = sd.var("b", bArr); Conv2DConfig c = Conv2DConfig.builder() .kH(kH).kW(kW) .pH(0).pW(0) .sH(1).sW(1) .dH(1).dW(1) .isSameMode(false) .build(); SDVariable out = sd.cnn().separableConv2d(in, dW, null, b, c); out = sd.nn().tanh("out", out); INDArray outArr = out.eval(); //Expected output size: out = (in - k + 2*p)/s + 1 = (28-2+0)/1+1 = 27 val outShape = outArr.shape(); assertArrayEquals(new long[]{mb, depthWise * nIn, 27, 27}, outShape); }
Example 18
Source File: ReductionOpValidation.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testReduce3_2() { Nd4j.getRandom().setSeed(12345); int d0 = 3; int d1 = 4; int d2 = 5; for (int[] reduceDims : new int[][]{{Integer.MAX_VALUE}, {0, 1, 2}, {0}, {1}, {2}, {0, 1}, {0, 2}, {1, 2}}) { for (int i = 0; i < 6; i++) { SameDiff sd = SameDiff.create(); sd.setLogExecution(false); INDArray a = Nd4j.rand(DataType.DOUBLE, d0, d1, d2); INDArray b = Nd4j.rand(DataType.DOUBLE, d0, d1, d2); SDVariable in = sd.var("in", a); SDVariable in2 = sd.var("in2", b); INDArray expOut; SDVariable reduced; String name; // System.out.println(i); switch (i) { case 0: reduced = sd.math().manhattanDistance(in, in2, reduceDims); name = "manhattan"; expOut = Nd4j.getExecutioner().exec(new ManhattanDistance(a, b, null, false, reduceDims)); break; case 1: reduced = sd.math().euclideanDistance(in, in2, reduceDims); name = "euclidean"; expOut = Nd4j.getExecutioner().exec(new EuclideanDistance(a, b, null, false, reduceDims)); break; case 2: reduced = sd.math().cosineSimilarity(in, in2, reduceDims); name = "cosine"; expOut = Nd4j.getExecutioner().exec(new CosineSimilarity(a, b, null, false, reduceDims)); break; case 3: reduced = sd.math().jaccardDistance(in, in2, reduceDims); name = "jaccard"; expOut = Nd4j.getExecutioner().exec(new JaccardDistance(a, b, null, false, reduceDims)); break; case 4: reduced = sd.math().hammingDistance(in, in2, reduceDims); name = "hamming"; expOut = Nd4j.getExecutioner().exec(new HammingDistance(a, b, null, false, reduceDims)); break; case 5: reduced = sd.math().cosineDistance(in, in2, reduceDims); name = "reduced"; expOut = Nd4j.getExecutioner().exec(new CosineDistance(a, b, null, false, reduceDims)); break; default: throw new RuntimeException(); } // System.out.println(i + " - end"); long[] expShape; if (Arrays.equals(new int[]{0}, reduceDims)) { expShape = new long[]{4, 5}; } else if (Arrays.equals(new int[]{1}, reduceDims)) { expShape = new long[]{3, 5}; } else if (Arrays.equals(new int[]{2}, reduceDims)) { expShape = new long[]{3, 4}; } else if (Arrays.equals(new int[]{Integer.MAX_VALUE}, reduceDims)) { expShape = new long[]{}; } else if (Arrays.equals(new int[]{0, 1}, reduceDims)) { expShape = new long[]{5}; } else if (Arrays.equals(new int[]{0, 2}, reduceDims)) { expShape = new long[]{4}; } else if (Arrays.equals(new int[]{1, 2}, reduceDims)) { expShape = new long[]{3}; } else if (Arrays.equals(new int[]{0, 1, 2}, reduceDims)) { expShape = new long[]{}; } else { throw new RuntimeException(); } String msg = name + " - dims=" + Arrays.toString(reduceDims); INDArray out = reduced.eval(); log.info(msg + " - expected shape: " + Arrays.toString(expShape) + ", out=" + Arrays.toString(out.shape()) + ", outExp=" + Arrays.toString(expOut.shape())); assertArrayEquals(msg, expShape, out.shape()); assertArrayEquals(msg, expShape, expOut.shape()); assertEquals(msg, expOut, out); } } }
Example 19
Source File: TensorMmul.java From deeplearning4j with Apache License 2.0 | 4 votes |
private SDVariable doTensorMmul(SDVariable a, SDVariable b, int[][] axes) { int validationLength = Math.min(axes[0].length, axes[1].length); INDArray aArray = a.eval(); INDArray bArray = b.eval(); for (int i = 0; i < validationLength; i++) { if (aArray.shape()[axes[0][i]] != bArray.shape()[axes[1][i]]) throw new IllegalArgumentException("Size of the given axes at each dimension must be the same size."); if (axes[0][i] < 0) axes[0][i] += aArray.shape().length; if (axes[1][i] < 0) axes[1][i] += bArray.shape().length; } List<Integer> listA = new ArrayList<>(); for (int i = 0; i < aArray.shape().length; i++) { if (!Ints.contains(axes[0], i)) listA.add(i); } int[] newAxesA = Ints.concat(Ints.toArray(listA), axes[0]); List<Integer> listB = new ArrayList<>(); for (int i = 0; i < bArray.shape().length; i++) { if (!Ints.contains(axes[1], i)) listB.add(i); } int[] newAxesB = Ints.concat(axes[1], Ints.toArray(listB)); int n2 = 1; int aLength = Math.min(aArray.shape().length, axes[0].length); for (int i = 0; i < aLength; i++) { n2 *= aArray.shape()[axes[0][i]]; } //if listA and listB are empty these do not initialize. //so initializing with {1} which will then get overridden if not empty long[] newShapeA = {-1, n2}; long[] oldShapeA; if (listA.size() == 0) { oldShapeA = new long[] {1}; } else { oldShapeA = Longs.toArray(listA); for (int i = 0; i < oldShapeA.length; i++) oldShapeA[i] = aArray.shape()[(int) oldShapeA[i]]; } int n3 = 1; int bNax = Math.min(bArray.shape().length, axes[1].length); for (int i = 0; i < bNax; i++) { n3 *= bArray.shape()[axes[1][i]]; } long[] newShapeB = {n3, -1}; long[] oldShapeB; if (listB.size() == 0) { oldShapeB = new long[] {1}; } else { oldShapeB = Longs.toArray(listB); for (int i = 0; i < oldShapeB.length; i++) oldShapeB[i] = bArray.shape()[(int) oldShapeB[i]]; } SDVariable at = sameDiff.reshape(sameDiff.permute(a,newAxesA),newShapeA); SDVariable bt = sameDiff.reshape(sameDiff.permute(b,newAxesB),newShapeB); SDVariable ret = sameDiff.mmul(at,bt); long[] aPlusB = Longs.concat(oldShapeA, oldShapeB); return sameDiff.reshape(ret, aPlusB); }
Example 20
Source File: LayerOpValidation.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testDeconv2dBasic() { int nIn = 2; int nOut = 3; int kH = 2; int kW = 2; int mb = 2; int imgH = 8; int imgW = 8; SameDiff sd = SameDiff.create(); INDArray wArr = Nd4j.rand(new int[]{kH, kW, nOut, nIn}); //Libnd4j expected weights format: [kH, kW, cOut, cIn] INDArray bArr = Nd4j.rand(new long[]{nOut}); INDArray inArr = Nd4j.rand(new long[]{mb, nIn, imgH, imgW}); SDVariable in = sd.var("in", inArr); SDVariable w = sd.var("W", wArr); SDVariable b = sd.var("b", bArr); DeConv2DConfig deconv = DeConv2DConfig.builder() .kH(kH).kW(kW) .pH(0).pW(0) .sH(1).sW(1) .dH(1).dW(1) .isSameMode(false) .build(); SDVariable out = sd.cnn().deconv2d(in, w, b, deconv); out = sd.nn().tanh("out", out); INDArray outArr = out.eval(); //Expected output size: out = (in + k + 2*p)/ s - 1 = (8 + 2+0)/1 - 1 = 9 val outShape = outArr.shape(); assertArrayEquals(new long[]{mb, nOut, 9, 9}, outShape); SDVariable loss = out.std(true); //Gradient check: TestCase tc = new TestCase(sd).gradientCheck(true); String err = OpValidation.validate(tc); assertNull(err); }