Java Code Examples for org.nd4j.autodiff.samediff.SameDiff#constant()
The following examples show how to use
org.nd4j.autodiff.samediff.SameDiff#constant() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ShapeOpValidation.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testGatherNdSingle() { SameDiff sameDiff = SameDiff.create(); INDArray arr1 = Transforms.sigmoid(Nd4j.linspace(DataType.DOUBLE, 1, 24, 24)).reshape(2, 3, 4); INDArray arr2 = Nd4j.create(new float[]{1, 2, 3, 0, 1, 3, 1, 0, 2}, new long[]{3, 3}).castTo(DataType.INT); SDVariable x = sameDiff.var("x", arr1); SDVariable idxs = sameDiff.constant("idxs", arr2); SDVariable result = sameDiff.gatherNd(x, idxs); // build expected output array INDArray expected = Nd4j.zeros(3); for (int i=0; i<3; i++){ INDArray idx = arr2.get(point(i), NDArrayIndex.all()); expected.putScalar(i, arr1.get(point(idx.getInt(0)), point(idx.getInt(1)), point(idx.getInt(2))).getDouble(0)); } assertEquals(expected, result.eval()); }
Example 2
Source File: ShapeOpValidation.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testGather2(){ SameDiff sd = SameDiff.create(); SDVariable input = sd.var("in", Nd4j.arange(6).castTo(DataType.FLOAT).reshape(2,3)); SDVariable indices = sd.constant("indices", Nd4j.createFromArray(0)); SDVariable gathered = sd.gather(input, indices, 1); SDVariable loss = gathered.std(true); sd.output((Map<String,INDArray>)null, gathered.name()); sd.setLossVariables(gathered.name()); String err = OpValidation.validate(new TestCase(sd) .gradCheckEpsilon(1e-3) .gradCheckMaxRelativeError(1e-4)); assertNull(err); }
Example 3
Source File: ShapeOpValidation.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testConstant(){ //OpValidationSuite.ignoreFailing(); //Case 0: no shape SameDiff sd = SameDiff.create(); INDArray ia = Nd4j.create(new double[]{1,2,3}); SDVariable in = sd.var(ia); SDVariable loss = in.std(true); assertNull(OpValidation.validate(new TestCase(sd).expected(in, ia))); //Case 1: shape is provided + scalar sd = SameDiff.create(); ia = Nd4j.scalar(3.0); in = sd.var(ia); SDVariable constant = sd.constant(Nd4j.create(DataType.FLOAT, 3,4,5)); INDArray exp = Nd4j.valueArrayOf(new long[]{3,4,5}, 3.0); loss = constant.std(true); assertNull(OpValidation.validate(new TestCase(sd) .gradientCheck(false) .expected(constant, Nd4j.create(DataType.FLOAT, 3,4,5)))); }
Example 4
Source File: MiscOpValidation.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testOneHot4() { INDArray indicesArr = Nd4j.createFromArray(0, 2, -1, 1); SameDiff sd = SameDiff.create(); SDVariable indices = sd.constant("indices", indicesArr); int depth = 3; int axis = -1; SDVariable oneHot = sd.oneHot("oneHot", indices, depth, axis, 5.0, 0.0, DataType.INT32); INDArray exp = Nd4j.create(new int[][]{{5, 0, 0}, {0,0,5}, {0,0,0}, {0, 5, 0}}); String err = OpValidation.validate(new TestCase(sd) .expected(oneHot, exp) .gradientCheck(false)); assertNull(err); }
Example 5
Source File: MiscOpValidation.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testOneHot2() { INDArray indicesArr = Nd4j.createFromArray(0, 2, -1, 1); SameDiff sd = SameDiff.create(); SDVariable indices = sd.constant("indices", indicesArr); int depth = 3; int axis = -1; SDVariable oneHot = sd.oneHot("oneHot", indices, depth, axis, 5.0, 0.0, DataType.DOUBLE); INDArray exp = Nd4j.create(new double[][]{{5, 0, 0}, {0,0,5}, {0,0,0}, {0, 5, 0}}); String err = OpValidation.validate(new TestCase(sd) .expected(oneHot, exp) .gradientCheck(false)); assertNull(err); }
Example 6
Source File: ShapeOpValidation.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testPermute3(){ INDArray in = Nd4j.linspace(DataType.FLOAT, 1, 6, 1).reshape(3,2); INDArray permute = Nd4j.createFromArray(1,0); // System.out.println(in); SameDiff sd = SameDiff.create(); SDVariable v = sd.var(in); SDVariable v2 = sd.constant(permute); SDVariable out = v.permute(v2); INDArray exp = in.transpose(); INDArray outArr = out.eval(); assertEquals(exp, outArr); }
Example 7
Source File: ShapeOpValidation.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testSequenceMask() { SameDiff sameDiff = SameDiff.create(); INDArray arr = Nd4j.createFromArray(new int[] {1, 3, 2}); // arr is not trainable, so it's constant in model SDVariable lengths = sameDiff.constant(arr); // Test with static max len int maxlen = 5; INDArray expected = Nd4j.create(new float[] { 1.f, 0.f, 0.f, 0.f, 0.f, 1.f, 1.f, 1.f, 0.f, 0.f, 1.f, 1.f, 0.f, 0.f, 0.f }).reshape(3,5); INDArray[] ret = Nd4j.exec(new SequenceMask(arr, maxlen, DataType.FLOAT)); SDVariable result1 = sameDiff.sequenceMask(lengths, maxlen, DataType.FLOAT); assertArrayEquals(expected.shape(), result1.eval().shape()); assertEquals(expected, result1.eval()); SDVariable loss = sameDiff.standardDeviation(result1, true); String err = OpValidation.validate(new TestCase(sameDiff) .expected(result1, expected) .gradientCheck(false)); assertNull(err); // Test with dynamic maxlen lengths = sameDiff.constant("lengths2", arr); SDVariable maxLen = sameDiff.constant("maxLen", Nd4j.scalar(5)); SDVariable result2 = sameDiff.sequenceMask(lengths, maxLen, DataType.FLOAT); // assertArrayEquals(expected.shape(), result2.eval().shape()); assertEquals(expected, result2.eval()); }
Example 8
Source File: TransformOpValidation.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testImageResize() { //TODO: Methods failed ResizeLanczos5, ResizeMitchelcubic, ResizeArea for (ImageResizeMethod method : ImageResizeMethod.values()) { if (method==ImageResizeMethod.ResizeLanczos5 || method==ImageResizeMethod.ResizeArea || method==ImageResizeMethod.ResizeMitchellcubic) {continue;} log.info("Trying {}", method); Nd4j.getRandom().setSeed(12345); SameDiff sd = SameDiff.create(); boolean preserveAspectRatio = true; boolean antialias = true; SDVariable inputImage = sd.var(Nd4j.rand(DataType.FLOAT, 1, 5, 5, 3)); // NHWC format long[] expectedShape = new long[]{1, 3, 3, 3}; SDVariable requestedSize = sd.constant(Nd4j.createFromArray( new long[]{3, 3})); Function<INDArray, String> checkFunction = in -> { boolean shapeOk = Arrays.equals(expectedShape, in.shape()); if (shapeOk) return null; return "Failed: shape differs - expected " + Arrays.toString(expectedShape) + " vs " + Arrays.toString(in.shape()) + " on method " + method; }; SDVariable out = new ImageResize(sd, inputImage, requestedSize, preserveAspectRatio, antialias, method).outputVariable().std(true); String err = OpValidation.validate(new TestCase(sd) .gradientCheck(false) .expected("image_resize", checkFunction)); assertNull(err); } }
Example 9
Source File: MiscOpValidation.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testConfusionMatrix(){ DataType dt = DataType.DOUBLE; for(boolean withMax : new boolean[]{true, false}){ SameDiff sd = SameDiff.create(); SDVariable labels = sd.constant("labels", Nd4j.createFromArray(1, 2, 4)); SDVariable predictions = sd.constant("predictions", Nd4j.createFromArray(2, 2, 4)); INDArray exp = Nd4j.create(new double[][]{ {0, 0, 0, 0, 0}, {0, 0, 1, 0, 0}, {0, 0, 1, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 1}}).castTo(DataType.FLOAT); SDVariable confMatrix; if(withMax){ confMatrix = sd.math().confusionMatrix(labels, predictions, 5).castTo(DataType.FLOAT); } else { confMatrix = sd.math().confusionMatrix("cm", labels, predictions, DataType.FLOAT); } SDVariable loss = confMatrix.castTo(DataType.DOUBLE).std(true); String err = OpValidation.validate(new TestCase(sd) .gradientCheck(false) //Not gradient checkable .expected(confMatrix, exp)); assertNull(err); } }
Example 10
Source File: MiscOpValidation.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testOneHot3() { //https://github.com/deeplearning4j/deeplearning4j/issues/6872 //https://www.tensorflow.org/api_docs/python/tf/one_hot //indices = [[0, 2], [1, -1]] INDArray indicesArr = Nd4j.create(new double[][]{{0, 2}, {1, -1}}).castTo(DataType.INT); INDArray expectedOut = Nd4j.zeros(DataType.DOUBLE, 2, 2, 3); /* # output: [2 x 2 x 3] # [[[1.0, 0.0, 0.0], # one_hot(0) # [0.0, 0.0, 1.0]], # one_hot(2) # [[0.0, 1.0, 0.0], # one_hot(1) # [0.0, 0.0, 0.0]]] # one_hot(-1) */ expectedOut.putScalar(0, 0, 0, 1.0); expectedOut.putScalar(0, 1, 2, 1.0); expectedOut.putScalar(1, 0, 1, 1.0); SameDiff sd = SameDiff.create(); SDVariable indices = sd.constant("indices", indicesArr); int depth = 3; int axis = -1; SDVariable oneHot = sd.oneHot("oneHot", indices, depth, axis, 1.0, 0.0).castTo(DataType.DOUBLE); SDVariable loss = oneHot.std(true); String err = OpValidation.validate(new TestCase(sd) .expected(oneHot, expectedOut) .gradientCheck(false)); assertNull(err); }
Example 11
Source File: SpaceToBatch.java From deeplearning4j with Apache License 2.0 | 5 votes |
public SpaceToBatch(SameDiff sameDiff, SDVariable[] args, int[] blocks, int[][] padding, boolean inPlace) { super(null, sameDiff, new SDVariable[]{args[0], sameDiff.constant(Nd4j.createFromArray(padding))}, inPlace); this.blocks = blocks; this.padding = padding; addIArgument(blocks[0]); }
Example 12
Source File: BatchToSpace.java From deeplearning4j with Apache License 2.0 | 5 votes |
public BatchToSpace(SameDiff sameDiff, SDVariable[] args, int[] blocks, int[][] crops, boolean inPlace) { super(null, sameDiff, new SDVariable[]{args[0], sameDiff.constant(Nd4j.createFromArray(crops))}, inPlace); this.blocks = blocks; this.crops = crops; for (val b : blocks) addIArgument(b); }
Example 13
Source File: ReductionOpValidation.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testDotProductAttentionWithMask(){ final INDArray keys = Nd4j.rand(new int[]{10, 4, 3}); final INDArray values = Nd4j.rand(new int[]{10, 4, 3}); final INDArray query = Nd4j.rand(new int[]{10, 4, 1}); final INDArray mask = Nd4j.rand(10, 3).gte(0.2).castTo(DataType.DOUBLE); final INDArray exec = Nd4j.matmul(keys, query, true, false, false) .divi(Math.sqrt(keys.size(1))); exec.addi(mask.reshape(10, 3, 1).sub(1).muli(1e9)); Nd4j.exec((CustomOp) new SoftMax(exec, exec, 1)); final INDArray finalOut = Nd4j.matmul(values, exec).norm1(); SameDiff sd = SameDiff.create(); SDVariable sdQ = sd.var("q", query); SDVariable sdK = sd.var("k", keys); SDVariable sdV = sd.var("v", values); SDVariable sdMask = sd.constant("mask", mask); SDVariable t = sd.nn.dotProductAttention(sdQ, sdK, sdV, sdMask, true); t.norm1("out"); String err = OpValidation.validate(new TestCase(sd) .expectedOutput("out", finalOut) .gradCheckSkipVariables("mask") .gradientCheck(true)); assertNull(err); }
Example 14
Source File: MiscOpValidation.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testGatherGradient() { Nd4j.getRandom().setSeed(12345); List<String> failed = new ArrayList<>(); for (int rank = 2; rank <= 3; rank++) { for (int dim = 0; dim < rank; dim++) { SameDiff sd = SameDiff.create(); int[] inShape; if (rank == 2) { inShape = new int[]{10, 10}; } else { inShape = new int[]{10, 10, 10}; } SDVariable in = sd.var("in", Nd4j.rand(DataType.DOUBLE, inShape)); SDVariable indices = sd.constant("indices", Nd4j.createFromArray(0, 3, 7)); INDArray gatherExp = null; if(rank == 2){ int tadDim = dim == 0 ? 1 : 0; //Swap: pullRows dim is "tensor along dimension" vs. gather's "index is value for this dimension" gatherExp = Nd4j.pullRows(in.getArr(), tadDim, new int[]{0,3,7}); } SDVariable gather = sd.gather(in, indices, dim); SDVariable loss = sd.standardDeviation("loss", gather, true, Integer.MAX_VALUE); String msg = "rank=" + rank + " dim=" + dim; TestCase tc = new TestCase(sd) .testName(msg) .gradCheckSkipVariables(indices.name()); if (gatherExp != null) { tc.expected(gather, gatherExp); } String error = OpValidation.validate(tc); if(error != null){ failed.add(msg + " - " + error); } } } assertEquals(failed.toString(), 0, failed.size()); }
Example 15
Source File: ReductionOpValidation.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testMultiHeadedDotProductAttentionWeirdInputs(){ final INDArray k = Nd4j.rand(new int[]{10, 4, 5}); final INDArray v = Nd4j.rand(new int[]{10, 4, 5}); final INDArray q = Nd4j.rand(new int[]{10, 4, 2}); final INDArray Wk = Nd4j.rand(new int[]{2, 3, 4}); final INDArray Wv = Nd4j.rand(new int[]{2, 3, 4}); final INDArray Wq = Nd4j.rand(new int[]{2, 3, 4}); final INDArray Wo = Nd4j.rand(new int[]{2* 3, 8}); final INDArray mask = Nd4j.rand(10, 5).gte(0.2).castTo(DataType.DOUBLE); final INDArray kP = Nd4j.tensorMmul(k, Wk, new int[][]{{1}, {2}}).permutei(0, 2, 3, 1); final INDArray vP = Nd4j.tensorMmul(v, Wv, new int[][]{{1}, {2}}).permutei(0, 2, 3, 1); final INDArray qP = Nd4j.tensorMmul(q, Wq, new int[][]{{1}, {2}}).permutei(0, 2, 3, 1); final DynamicCustomOp dot_product_attention = DynamicCustomOp .builder("dot_product_attention") .addInputs(qP, kP, vP, mask) .addIntegerArguments(1, 0) .build(); final INDArray[] outputs = Nd4j.exec(dot_product_attention); final INDArray attOut = outputs[0].permutei(0, 3, 1, 2).reshape(k.size(0), q.size(2), Wv.size(0) * Wv.size(1)); final INDArray out = Nd4j.tensorMmul(attOut, Wo, new int[][]{{2}, {0}}).permutei(0, 2, 1); final INDArray finalOut = out.norm2(); for (char orderWeights: new char[]{'f', 'c'}){ for (char orderInput: new char[]{'f', 'c'}){ log.info("-*- Starting Test: input Order = {}, weightOrder = {} -*-", orderInput, orderWeights); SameDiff sd = SameDiff.create(); SDVariable sdQ = sd.var("q", q.dup(orderInput)); SDVariable sdK = sd.var("k", k.dup(orderInput)); SDVariable sdV = sd.var("v", v.dup(orderInput)); SDVariable sdWq = sd.var("Wq", Wq.dup(orderWeights)); SDVariable sdWk = sd.var("Wk", Wk.dup(orderWeights)); SDVariable sdWv = sd.var("Wv", Wv.dup(orderWeights)); SDVariable sdWo = sd.var("Wo", Wo.dup(orderWeights)); SDVariable sdMask = sd.constant("mask", mask); SDVariable t = sd.nn.multiHeadDotProductAttention(sdQ, sdK, sdV, sdWq, sdWk, sdWv, sdWo, sdMask, true); t.norm2("out"); String err = OpValidation.validate(new TestCase(sd) .expectedOutput("out", finalOut) .gradientCheck(false) .gradCheckSkipVariables("mask")); assertNull(err); } } }
Example 16
Source File: ReductionOpValidation.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testMultiHeadedDotProductAttention(){ final INDArray k = Nd4j.rand(new int[]{10, 4, 5}); final INDArray v = Nd4j.rand(new int[]{10, 4, 5}); final INDArray q = Nd4j.rand(new int[]{10, 4, 2}); final INDArray Wk = Nd4j.rand(new int[]{2, 3, 4}); final INDArray Wv = Nd4j.rand(new int[]{2, 3, 4}); final INDArray Wq = Nd4j.rand(new int[]{2, 3, 4}); final INDArray Wo = Nd4j.rand(new int[]{2* 3, 8}); final INDArray kP = Nd4j.tensorMmul(k, Wk, new int[][]{{1}, {2}}).permutei(0, 2, 3, 1); final INDArray vP = Nd4j.tensorMmul(v, Wv, new int[][]{{1}, {2}}).permutei(0, 2, 3, 1); final INDArray qP = Nd4j.tensorMmul(q, Wq, new int[][]{{1}, {2}}).permutei(0, 2, 3, 1); final INDArray mask = Nd4j.rand(10, 5).gte(0.2).castTo(DataType.DOUBLE); final DynamicCustomOp dot_product_attention = DynamicCustomOp .builder("dot_product_attention") .addInputs(qP, kP, vP, mask) .addIntegerArguments(1, 0) .build(); final INDArray[] outputs = Nd4j.exec(dot_product_attention); final INDArray attOut = outputs[0].permutei(0, 3, 1, 2).reshape(k.size(0), q.size(2), Wv.size(0) * Wv.size(1)); final INDArray out = Nd4j.tensorMmul(attOut, Wo, new int[][]{{2}, {0}}).permutei(0, 2, 1); final INDArray finalOut = out.norm2(); SameDiff sd = SameDiff.create(); SDVariable sdQ = sd.var("q", q); SDVariable sdK = sd.var("k", k); SDVariable sdV = sd.var("v", v); SDVariable sdWq = sd.var("Wq", Wq); SDVariable sdWk = sd.var("Wk", Wk); SDVariable sdWv = sd.var("Wv", Wv); SDVariable sdWo = sd.var("Wo", Wo); SDVariable sdMask = sd.constant("mask", mask); SDVariable t = sd.nn.multiHeadDotProductAttention(sdQ, sdK, sdV, sdWq, sdWk, sdWv, sdWo, sdMask, true); t.norm2("out"); String err = OpValidation.validate(new TestCase(sd) .expectedOutput("out", finalOut) .gradientCheck(true) .gradCheckSkipVariables("mask")); assertNull(err); }
Example 17
Source File: RnnOpValidation.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testGRUCell(){ Nd4j.getRandom().setSeed(12345); int mb = 2; int nIn = 3; int nOut = 4; SameDiff sd = SameDiff.create(); SDVariable x = sd.constant(Nd4j.rand(DataType.FLOAT, mb, nIn)); SDVariable hLast = sd.constant(Nd4j.rand(DataType.FLOAT, mb, nOut)); SDVariable Wru = sd.constant(Nd4j.rand(DataType.FLOAT, (nIn+nOut), 2*nOut)); SDVariable Wc = sd.constant(Nd4j.rand(DataType.FLOAT, (nIn+nOut), nOut)); SDVariable bru = sd.constant(Nd4j.rand(DataType.FLOAT, 2*nOut)); SDVariable bc = sd.constant(Nd4j.rand(DataType.FLOAT, nOut)); double fb = 1.0; GRUWeights weights = GRUWeights.builder() .ruWeight(Wru) .cWeight(Wc) .ruBias(bru) .cBias(bc) .build(); SDVariable[] v = sd.rnn().gruCell(x, hLast, weights); List<String> toExec = new ArrayList<>(); for(SDVariable sdv : v){ toExec.add(sdv.name()); } //Test forward pass: Map<String,INDArray> m = sd.output(null, toExec); //Weights and bias order: [r, u], [c] //Reset gate: INDArray wr_x = Wru.getArr().get(NDArrayIndex.interval(0,nIn), NDArrayIndex.interval(0, nOut)); //Input weights INDArray wr_r = Wru.getArr().get(NDArrayIndex.interval(nIn,nIn+nOut), NDArrayIndex.interval(0, nOut)); //Recurrent weights INDArray br = bru.getArr().get(NDArrayIndex.interval(0, nOut)); INDArray rExp = x.getArr().mmul(wr_x).addiRowVector(br); //[mb,nIn]*[nIn, nOut] + [nOut] rExp.addi(hLast.getArr().mmul(wr_r)); //[mb,nOut]*[nOut,nOut] Transforms.sigmoid(rExp,false); INDArray rAct = m.get(toExec.get(0)); assertEquals(rExp, rAct); //Update gate: INDArray wu_x = Wru.getArr().get(NDArrayIndex.interval(0,nIn), NDArrayIndex.interval(nOut, 2*nOut)); //Input weights INDArray wu_r = Wru.getArr().get(NDArrayIndex.interval(nIn,nIn+nOut), NDArrayIndex.interval(nOut, 2*nOut)); //Recurrent weights INDArray bu = bru.getArr().get(NDArrayIndex.interval(nOut, 2*nOut)); INDArray uExp = x.getArr().mmul(wu_x).addiRowVector(bu); //[mb,nIn]*[nIn, nOut] + [nOut] uExp.addi(hLast.getArr().mmul(wu_r)); //[mb,nOut]*[nOut,nOut] Transforms.sigmoid(uExp,false); INDArray uAct = m.get(toExec.get(1)); assertEquals(uExp, uAct); //c = tanh(x * Wcx + Wcr * (hLast .* r)) INDArray Wcx = Wc.getArr().get(NDArrayIndex.interval(0,nIn), NDArrayIndex.all()); INDArray Wcr = Wc.getArr().get(NDArrayIndex.interval(nIn, nIn+nOut), NDArrayIndex.all()); INDArray cExp = x.getArr().mmul(Wcx); cExp.addi(hLast.getArr().mul(rExp).mmul(Wcr)); cExp.addiRowVector(bc.getArr()); Transforms.tanh(cExp, false); assertEquals(cExp, m.get(toExec.get(2))); //h = u * hLast + (1-u) * c INDArray hExp = uExp.mul(hLast.getArr()).add(uExp.rsub(1.0).mul(cExp)); assertEquals(hExp, m.get(toExec.get(3))); }
Example 18
Source File: ShapeOpValidation.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testReverseSequence() { SameDiff sameDiff = SameDiff.create(); float[] input_data = new float[]{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; float[] expected_output = new float[]{ 7, 8, 9, 4, 5, 6, 1, 2, 3, 0, 0, 0, 0, 0, 0, 4, 5, 6, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; INDArray arr1 = Nd4j.create(input_data, new long[]{2, 5, 3}).castTo(DataType.DOUBLE); INDArray seqLenArr = Nd4j.createFromArray(3, 2); SDVariable x = sameDiff.constant("x", arr1); SDVariable seq_lengths = sameDiff.constant("seq_lengths", seqLenArr); SDVariable result = sameDiff.reverseSequence(x, seq_lengths, 1, 0); INDArray expected = Nd4j.create(expected_output, new long[]{2, 5, 3}).castTo(DataType.DOUBLE); assertArrayEquals(arr1.shape(), result.eval().shape()); assertEquals(expected, result.eval()); SDVariable loss = sameDiff.standardDeviation(result, true); String err = OpValidation.validate(new TestCase(sameDiff) .expected(result.name(), expected) .gradientCheck(false)); assertNull(err); }
Example 19
Source File: Linspace.java From deeplearning4j with Apache License 2.0 | 4 votes |
public Linspace(SameDiff sameDiff, DataType dataType, double start, double stop, long number) { this(sameDiff, sameDiff.constant(start), sameDiff.constant(stop), sameDiff.constant(number), dataType); }
Example 20
Source File: RnnOpValidation.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testRnnBlockCellManualTFCompare() { //Test case: "rnn/lstmblockcell/static_batch1_n3-2_tsLength1_noPH_noClip_fBias1_noIS" SameDiff sd = SameDiff.create(); INDArray zero2d = Nd4j.createFromArray(new float[][]{{0,0}}); INDArray zero1d = Nd4j.createFromArray(new float[]{0,0}); SDVariable x = sd.constant(Nd4j.createFromArray(new float[][]{{0.7787856f,0.80119777f,0.72437465f}})); SDVariable cLast = sd.constant(zero2d); SDVariable yLast = sd.constant(zero2d); //Weights shape: [(nIn+nOut), 4*nOut] SDVariable W = sd.constant(Nd4j.createFromArray(-0.61977,-0.5708851,-0.38089648,-0.07994056,-0.31706482,0.21500933,-0.35454142,-0.3239095,-0.3177906, 0.39918554,-0.3115911,0.540841,0.38552666,0.34270835,-0.63456273,-0.13917702,-0.2985368,0.343238, -0.3178353,0.017154932,-0.060259163,0.28841054,-0.6257687,0.65097713,0.24375653,-0.22315514,0.2033832, 0.24894875,-0.2062299,-0.2242794,-0.3809483,-0.023048997,-0.036284804,-0.46398938,-0.33979666,0.67012596, -0.42168984,0.34208286,-0.0456419,0.39803517).castTo(DataType.FLOAT).reshape(5,8)); SDVariable Wci = sd.constant(zero1d); SDVariable Wcf = sd.constant(zero1d); SDVariable Wco = sd.constant(zero1d); SDVariable b = sd.constant(Nd4j.zeros(DataType.FLOAT, 8)); double fb = 1.0; LSTMConfiguration conf = LSTMConfiguration.builder() .peepHole(false) .forgetBias(fb) .clippingCellValue(0.0) .build(); LSTMWeights weights = LSTMWeights.builder().weights(W).bias(b) .inputPeepholeWeights(Wci).forgetPeepholeWeights(Wcf).outputPeepholeWeights(Wco).build(); LSTMCellOutputs v = new LSTMCellOutputs(sd.rnn().lstmCell(x, cLast, yLast, weights, conf)); //Output order: i, c, f, o, z, h, y List<String> toExec = new ArrayList<>(); for(SDVariable sdv : v.getAllOutputs()){ toExec.add(sdv.name()); } //Test forward pass: Map<String,INDArray> m = sd.output(null, toExec); INDArray out0 = Nd4j.create(new float[]{0.27817473f, 0.53092605f}, new int[]{1,2}); //Input mod gate INDArray out1 = Nd4j.create(new float[]{-0.18100877f, 0.19417824f}, new int[]{1,2}); //CS (pre tanh) INDArray out2 = Nd4j.create(new float[]{0.73464274f, 0.83901811f}, new int[]{1,2}); //Forget gate INDArray out3 = Nd4j.create(new float[]{0.22481689f, 0.52692068f}, new int[]{1,2}); //Output gate INDArray out4 = Nd4j.create(new float[]{-0.65070170f, 0.36573499f}, new int[]{1,2}); //block input INDArray out5 = Nd4j.create(new float[]{-0.17905743f, 0.19177397f}, new int[]{1,2}); //Cell state INDArray out6 = Nd4j.create(new float[]{-0.04025514f, 0.10104967f}, new int[]{1,2}); //Output // for(int i=0; i<toExec.size(); i++ ){ // System.out.println(i + "\t" + m.get(toExec.get(i))); // } assertEquals(out0, m.get(toExec.get(0))); //Input modulation gate assertEquals(out1, m.get(toExec.get(1))); //Cell state (pre tanh) assertEquals(out2, m.get(toExec.get(2))); //Forget gate assertEquals(out3, m.get(toExec.get(3))); //Output gate assertEquals(out4, m.get(toExec.get(4))); //block input assertEquals(out5, m.get(toExec.get(5))); //Cell state assertEquals(out6, m.get(toExec.get(6))); //Output }