Java Code Examples for org.nd4j.autodiff.samediff.SameDiff#mmul()
The following examples show how to use
org.nd4j.autodiff.samediff.SameDiff#mmul() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TransformOpValidation.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testMatMulTensor() { final INDArray a = Nd4j.rand(new int[]{1, 2, 3, 4, 5}); final INDArray b = Nd4j.rand(new int[]{1, 2, 3, 5, 6}); final INDArray z = Nd4j.matmul(a, b); assertArrayEquals(z.shape(), new long[]{1, 2, 3, 4, 6}); SameDiff sd = SameDiff.create(); SDVariable sdA = sd.var("a", a); SDVariable sdB = sd.var("b", b); SDVariable t = sd.mmul(sdA, sdB); t.norm1("out"); String err = OpValidation.validate(new TestCase(sd) .gradientCheck(true)); assertNull(err, err); }
Example 2
Source File: SameDiffDenseVertex.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public SDVariable defineVertex(SameDiff sameDiff, Map<String, SDVariable> layerInput, Map<String, SDVariable> paramTable, Map<String, SDVariable> maskVars) { SDVariable weights = paramTable.get(DefaultParamInitializer.WEIGHT_KEY); SDVariable bias = paramTable.get(DefaultParamInitializer.BIAS_KEY); SDVariable mmul = sameDiff.mmul("mmul", layerInput.get("in"), weights); SDVariable z = mmul.add("z", bias); return activation.asSameDiff("out", sameDiff, z); }
Example 3
Source File: SameDiffDense.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public SDVariable defineLayer(SameDiff sd, SDVariable layerInput, Map<String, SDVariable> paramTable, SDVariable mask) { SDVariable weights = paramTable.get(DefaultParamInitializer.WEIGHT_KEY); SDVariable bias = paramTable.get(DefaultParamInitializer.BIAS_KEY); SDVariable mmul = sd.mmul("mmul", layerInput, weights); SDVariable z = mmul.add("z", bias); return activation.asSameDiff("out", sd, z); }
Example 4
Source File: MinimalSameDiffDense.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public SDVariable defineLayer(SameDiff sd, SDVariable layerInput, Map<String, SDVariable> paramTable, SDVariable mask) { SDVariable weights = paramTable.get(DefaultParamInitializer.WEIGHT_KEY); SDVariable bias = paramTable.get(DefaultParamInitializer.BIAS_KEY); SDVariable mmul = sd.mmul("mmul", layerInput, weights); SDVariable z = mmul.add("z", bias); return activation.asSameDiff("out", sd, z); }
Example 5
Source File: MiscOpValidation.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testMmulGradients(){ int[] aShape = new int[]{2,3}; int[] bShape = new int[]{3,4}; List<String> failed = new ArrayList<>(); for( char aOrder : new char[]{'c', 'f'}) { for (char bOrder : new char[]{'c', 'f'}) { for (boolean transposeA : new boolean[]{false, true}) { for (boolean transposeB : new boolean[]{false, true}) { for (boolean transposeResult : new boolean[]{false, true}) { //https://github.com/deeplearning4j/deeplearning4j/issues/5648 Nd4j.getRandom().setSeed(12345); INDArray aArr = Nd4j.rand(DataType.DOUBLE, t(transposeA, aShape)).dup(aOrder); INDArray bArr = Nd4j.rand(DataType.DOUBLE, t(transposeB, bShape)).dup(bOrder); SameDiff sd = SameDiff.create(); SDVariable a = sd.var("a", aArr); SDVariable b = sd.var("b", bArr); SDVariable mmul = sd.mmul(a, b, transposeA, transposeB, transposeResult); INDArray exp = (transposeA ? aArr.transpose() : aArr); exp = exp.mmul(transposeB ? bArr.transpose() : bArr); exp = (transposeResult ? exp.transpose() : exp); SDVariable loss = mmul.std(true); String name = aOrder + "," + bOrder + ",tA=" + transposeA + ",tB=" + transposeB + ",tRes=" + transposeResult; TestCase tc = new TestCase(sd).testName(name) .expected(mmul, exp); String err = OpValidation.validate(tc, true); if(err != null) failed.add(err); } } } } } assertEquals(failed.toString(), 0, failed.size()); }
Example 6
Source File: TransformOpValidation.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testMatMulTensorTranspose() { for (boolean transposeA : new boolean[]{false, true}) { for (boolean transposeB : new boolean[]{false, true}) { for (boolean transposeResult : new boolean[]{false, true}) { log.info("Testing with transposeA={}; transposeB={}; transposeResult={};", transposeA, transposeB, transposeResult); int m = 0, n = 0, k = 0, l = 0, i = 0, j = 0; if (!transposeA && !transposeB && !transposeResult) { m = 4; n = 5; k = 5; l = 6; i = 4; j = 6; } if (!transposeA && transposeB && !transposeResult) { m = 4; n = 5; k = 6; l = 5; i = 4; j = 6; } if (!transposeA && !transposeB && transposeResult) { m = 4; n = 5; k = 5; l = 6; i = 6; j = 4; } if (!transposeA && transposeB && transposeResult) { m = 4; n = 5; k = 6; l = 5; i = 6; j = 4; } if (transposeA && !transposeB && !transposeResult) { m = 5; n = 4; k = 5; l = 6; i = 4; j = 6; } if (transposeA && transposeB && !transposeResult) { m = 5; n = 4; k = 6; l = 5; i = 4; j = 6; } if (transposeA && !transposeB && transposeResult) { m = 5; n = 4; k = 5; l = 6; i = 6; j = 4; } if (transposeA && transposeB && transposeResult) { m = 5; n = 4; k = 6; l = 5; i = 6; j = 4; } final INDArray a = Nd4j.rand(new int[]{1, 2, 3, m, n}); final INDArray b = Nd4j.rand(new int[]{1, 2, 3, k, l}); final INDArray z = Nd4j.matmul(a, b, transposeA, transposeB, transposeResult); assertArrayEquals(z.shape(), new long[]{1, 2, 3, i, j}); SameDiff sd = SameDiff.create(); SDVariable sdA = sd.var("a", a); SDVariable sdB = sd.var("b", b); SDVariable t = sd.mmul(sdA, sdB, transposeA, transposeB, transposeResult); t.norm1("out"); String err = OpValidation.validate(new TestCase(sd) .gradientCheck(true)); assertNull(err, err); } } } }
Example 7
Source File: LocallyConnected1D.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public SDVariable defineLayer(SameDiff sameDiff, SDVariable layerInput, Map<String, SDVariable> paramTable, SDVariable mask) { SDVariable w = paramTable.get(ConvolutionParamInitializer.WEIGHT_KEY); // (outH, featureDim, nOut) int outH = outputSize; int sH = stride; int kH = kernel; if(padding > 0 || (cm == ConvolutionMode.Same && paddingR > 0)){ //Note: for same mode, bottom/right padding can be 1 more than top/left padding //NCW format. if(cm == ConvolutionMode.Same) { layerInput = sameDiff.nn().pad(layerInput, sameDiff.constant(Nd4j.createFromArray(new int[][]{{0, 0}, {0, 0}, {padding, paddingR}})), PadMode.CONSTANT, 0); } else { layerInput = sameDiff.nn().pad(layerInput, sameDiff.constant(Nd4j.createFromArray(new int[][]{{0, 0}, {0, 0}, {padding, padding}})), PadMode.CONSTANT, 0); } } SDVariable[] inputArray = new SDVariable[outH]; for (int i = 0; i < outH; i++) { SDVariable slice = layerInput.get(SDIndex.all(), // miniBatch SDIndex.all(), // nIn SDIndex.interval(i * sH, i * sH + kH) // kernel ); inputArray[i] = sameDiff.reshape(slice, 1, -1, featureDim); } SDVariable concatOutput = sameDiff.concat(0, inputArray); // (outH, miniBatch, featureDim) SDVariable mmulResult = sameDiff.mmul(concatOutput, w); // (outH, miniBatch, nOut) SDVariable result = sameDiff.permute(mmulResult, 1, 2, 0); // (miniBatch, nOut, outH) if (hasBias) { SDVariable b = paramTable.get(ConvolutionParamInitializer.BIAS_KEY); SDVariable biasAddedResult = sameDiff.nn().biasAdd(result, b, true); return activation.asSameDiff("out", sameDiff, biasAddedResult); } else { return activation.asSameDiff("out", sameDiff, result); } }
Example 8
Source File: LocallyConnected2D.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public SDVariable defineLayer(SameDiff sameDiff, SDVariable layerInput, Map<String, SDVariable> paramTable, SDVariable mask) { SDVariable w = paramTable.get(ConvolutionParamInitializer.WEIGHT_KEY); long[] inputShape = layerInput.getShape(); long miniBatch = inputShape[0]; int outH = outputSize[0]; int outW = outputSize[1]; int sH = stride[0]; int sW = stride[1]; int kH = kernel[0]; int kW = kernel[1]; boolean nchw = format == CNN2DFormat.NCHW; if(!nchw) layerInput = layerInput.permute(0,3,1,2); //NHWC to NCHW if(padding[0] > 0 || padding[1] > 0 || (cm == ConvolutionMode.Same && (paddingBr[0] > 0 || paddingBr[1] > 0))){ //Note: for same mode, bottom/right padding can be 1 more than top/left padding //NCHW format if(cm == ConvolutionMode.Same){ layerInput = sameDiff.nn().pad(layerInput, sameDiff.constant(Nd4j.createFromArray(new int[][]{{0,0},{0,0},{padding[0], paddingBr[0]}, {padding[1], paddingBr[1]}})), PadMode.CONSTANT, 0.0); } else { layerInput = sameDiff.nn().pad(layerInput, sameDiff.constant(Nd4j.createFromArray(new int[][]{{0,0},{0,0},{padding[0], padding[0]}, {padding[1], padding[1]}})), PadMode.CONSTANT, 0.0); } } SDVariable[] inputArray = new SDVariable[outH * outW]; for (int i = 0; i < outH; i++) { for (int j = 0; j < outW; j++) { SDVariable slice = layerInput.get(SDIndex.all(), // miniBatch SDIndex.all(), // nIn SDIndex.interval(i * sH, i * sH + kH), // kernel height SDIndex.interval(j * sW, j * sW + kW) // kernel width ); inputArray[i * outH + j] = sameDiff.reshape(slice, 1, miniBatch, featureDim); } } SDVariable concatOutput = sameDiff.concat(0, inputArray); // (outH * outW, miniBatch, featureDim) SDVariable mmulResult = sameDiff.mmul(concatOutput, w); // (outH * outW, miniBatch, nOut) SDVariable reshapeResult = sameDiff.reshape(mmulResult, outH, outW, miniBatch, nOut); SDVariable permutedResult = nchw ? reshapeResult.permute(2, 3, 0, 1) : reshapeResult.permute(2, 0, 1, 3); // (mb, nOut, outH, outW) or (mb, outH, outW, nOut) if (hasBias) { SDVariable b = paramTable.get(ConvolutionParamInitializer.BIAS_KEY); SDVariable biasAddedResult = sameDiff.nn().biasAdd(permutedResult, b, nchw); return activation.asSameDiff("out", sameDiff, biasAddedResult); } else { return activation.asSameDiff("out", sameDiff, permutedResult); } }