Java Code Examples for org.nd4j.autodiff.samediff.SDVariable#getShape()
The following examples show how to use
org.nd4j.autodiff.samediff.SDVariable#getShape() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseTransformOp.java From nd4j with Apache License 2.0 | 6 votes |
public BaseTransformOp(SameDiff sameDiff, SDVariable i_v, long[] shape, boolean inPlace, Object[] extraArgs) { super(sameDiff,inPlace,extraArgs); if (i_v != null) { f().validateDifferentialFunctionsameDiff(i_v); this.xVertexId = i_v.getVarName(); sameDiff.addArgsFor(new SDVariable[]{i_v},this); if(i_v.getShape() != null) { this.n = ArrayUtil.prod(i_v.getShape()); } if(Shape.isPlaceholderShape(i_v.getShape())) { sameDiff.addPropertyToResolve(this,i_v.getVarName()); } } else { throw new IllegalArgumentException("Input must not null variable."); } }
Example 2
Source File: DynamicCustomOp.java From nd4j with Apache License 2.0 | 5 votes |
private INDArray attemptToGetOrCreateArrForVar(SDVariable var, long[] currShape) { INDArray arr = null; if (Shape.isPlaceholderShape(var.getShape())) { if (var.getShape() == null) { val shape = calculateOutputShape(); if (!shape.isEmpty()) { if (currShape != null && !Shape.isPlaceholderShape(currShape)) { sameDiff.putShapeForVarName(var.getVarName(), currShape); arr = var.storeAndAllocateNewArray(); } } else arr = null; } } else if (sameDiff.getArrForVarName(var.getVarName()) == null) { if (var.getShape() != null) arr = var.storeAndAllocateNewArray(); } else { arr = var.getArr(); } if (arr != null) { sameDiff.associateArrayWithVariable(arr, var); addOutputArgument(arr); } return arr; }
Example 3
Source File: BaseTransformOp.java From nd4j with Apache License 2.0 | 5 votes |
public BaseTransformOp(SameDiff sameDiff, SDVariable i_v1, SDVariable i_v2, boolean inPlace) { super(sameDiff,inPlace,new Object[] {i_v2}); if (i_v1 != null && i_v2 != null) { f().validateDifferentialFunctionsameDiff(i_v1); f().validateDifferentialFunctionsameDiff(i_v2); this.sameDiff = sameDiff; this.inPlace = inPlace; this.xVertexId = i_v1.getVarName(); this.yVertexId = i_v2.getVarName(); sameDiff.addArgsFor(new SDVariable[]{i_v1,i_v2},this); if(Shape.isPlaceholderShape(i_v1.getShape())) { sameDiff.addPropertyToResolve(this,i_v1.getVarName()); } if(Shape.isPlaceholderShape(i_v2.getShape())) { sameDiff.addPropertyToResolve(this,i_v2.getVarName()); } if(i_v1.getShape() != null) this.n = ArrayUtil.prod(i_v1.getShape()); } else { throw new IllegalArgumentException("Input not null variables."); } }
Example 4
Source File: BaseTransformOp.java From nd4j with Apache License 2.0 | 5 votes |
public BaseTransformOp(SameDiff sameDiff, SDVariable i_v1, SDVariable i_v2, Object[] extraArgs) { super(sameDiff,extraArgs); if (i_v1 != null && i_v2 != null) { f().validateDifferentialFunctionsameDiff(i_v1); f().validateDifferentialFunctionsameDiff(i_v2); this.sameDiff = sameDiff; this.xVertexId = i_v1.getVarName(); this.yVertexId = i_v2.getVarName(); sameDiff.addArgsFor(new SDVariable[]{i_v1,i_v2},this); if(i_v1.getShape() != null) this.n = ArrayUtil.prod(i_v1.getShape()); if(Shape.isPlaceholderShape(i_v1.getShape())) { sameDiff.addPropertyToResolve(this,i_v1.getVarName()); } if(Shape.isPlaceholderShape(i_v2.getShape())) { sameDiff.addPropertyToResolve(this,i_v2.getVarName()); } } else { throw new IllegalArgumentException("Input not null variables."); } }
Example 5
Source File: Unstack.java From deeplearning4j with Apache License 2.0 | 5 votes |
public Unstack(SameDiff sameDiff, SDVariable value, int axis) { super(null, sameDiff, new SDVariable[]{value}, false); this.jaxis = axis; if (value.getShape() != null){ if (value.getShape()[axis] != -1){ num = (int)value.getShape()[axis]; } } if (num <= 0){ throw new ND4JIllegalStateException("Unstack: Unable to infer number of outputs from input. Provide number of outputs explicitly."); } addArgs(); }
Example 6
Source File: BatchMmul.java From deeplearning4j with Apache License 2.0 | 5 votes |
public BatchMmul(SameDiff sameDiff, SDVariable[] matrices, boolean transposeA, boolean transposeB) { super(null, sameDiff, ArrayUtils.addAll( new SDVariable[]{ sameDiff.var(Nd4j.ones(matrices[0].dataType(), matrices.length / 2)), // alphas sameDiff.var(Nd4j.zeros(matrices[1].dataType(), matrices.length / 2))}, // betas matrices)); Preconditions.checkState(matrices.length % 2 == 0, "The number of provided matrices needs" + "to be divisible by two."); this.batchSize = matrices.length / 2; SDVariable firstMatrix = matrices[0]; long[] firstShape = firstMatrix.getShape(); for (int i = 0; i < batchSize; i++) { Preconditions.checkState(Arrays.equals(firstShape, matrices[i].getShape())); } SDVariable lastMatrix = matrices[2 * batchSize - 1]; long[] lastShape = lastMatrix.getShape(); for (int i = batchSize; i < 2 * batchSize; i++) { Preconditions.checkState(Arrays.equals(lastShape, matrices[i].getShape())); } this.transposeA = transposeA ? 1 : 0; this.transposeB = transposeB ? 1 : 0; this.M = transposeA ? (int) firstShape[1]: (int) firstShape[0]; this.N = transposeA ? (int) firstShape[0]: (int) firstShape[1]; this.K = transposeB ? (int) lastShape[0]: (int) lastShape[1]; addArgs(); }
Example 7
Source File: RecurrentAttentionLayer.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public SDVariable defineLayer(SameDiff sameDiff, SDVariable layerInput, Map<String, SDVariable> paramTable, SDVariable mask) { final val W = paramTable.get(WEIGHT_KEY); final val R = paramTable.get(RECURRENT_WEIGHT_KEY); final val b = paramTable.get(BIAS_KEY); long[] shape = layerInput.getShape(); Preconditions.checkState(shape != null, "Null shape for input placeholder"); SDVariable[] inputSlices = sameDiff.unstack(layerInput, 2, (int)shape[2]); this.timeSteps = inputSlices.length; SDVariable[] outputSlices = new SDVariable[timeSteps]; SDVariable prev = null; for (int i = 0; i < timeSteps; i++) { final val x_i = inputSlices[i]; outputSlices[i] = x_i.mmul(W); if(hasBias){ outputSlices[i] = outputSlices[i].add(b); } if(prev != null){ SDVariable attn; if(projectInput){ val Wq = paramTable.get(WEIGHT_KEY_QUERY_PROJECTION); val Wk = paramTable.get(WEIGHT_KEY_KEY_PROJECTION); val Wv = paramTable.get(WEIGHT_KEY_VALUE_PROJECTION); val Wo = paramTable.get(WEIGHT_KEY_OUT_PROJECTION); attn = sameDiff.nn.multiHeadDotProductAttention(getLayerName()+"_attention_"+i, prev, layerInput, layerInput, Wq, Wk, Wv, Wo, mask, true); }else{ attn = sameDiff.nn.dotProductAttention(getLayerName()+"_attention_"+i, prev, layerInput, layerInput, mask, true); } attn = sameDiff.squeeze(attn, 2); outputSlices[i] = outputSlices[i].add(attn.mmul(R)); } outputSlices[i] = activation.asSameDiff(sameDiff, outputSlices[i]); outputSlices[i] = sameDiff.expandDims(outputSlices[i], 2); prev = outputSlices[i]; } return sameDiff.concat(2, outputSlices); }
Example 8
Source File: DifferentialFunctionFactory.java From nd4j with Apache License 2.0 | 4 votes |
public Constant val(SDVariable iX) { return new Constant(sameDiff(), iX, iX.getShape()); }
Example 9
Source File: BaseBroadcastBoolOp.java From deeplearning4j with Apache License 2.0 | 4 votes |
public BaseBroadcastBoolOp(SameDiff sameDiff, SDVariable i_v, int[] dimension, Object[] extraArgs) { this(sameDiff, i_v, i_v.getShape(), false, dimension, extraArgs); }
Example 10
Source File: BaseBroadcastBoolOp.java From deeplearning4j with Apache License 2.0 | 4 votes |
public BaseBroadcastBoolOp(SameDiff sameDiff, SDVariable i_v, int[] dimension, boolean inPlace) { this(sameDiff, i_v, i_v.getShape(), inPlace, dimension, null); }
Example 11
Source File: BaseTransformOp.java From deeplearning4j with Apache License 2.0 | 4 votes |
public BaseTransformOp(SameDiff sameDiff, SDVariable i_v, Object[] extraArgs) { this(sameDiff,i_v,i_v.getShape(),false,extraArgs); }
Example 12
Source File: BaseTransformOp.java From deeplearning4j with Apache License 2.0 | 4 votes |
public BaseTransformOp(SameDiff sameDiff,SDVariable i_v,boolean inPlace) { this(sameDiff,i_v,i_v.getShape(),inPlace,null); }
Example 13
Source File: BaseBroadcastOp.java From deeplearning4j with Apache License 2.0 | 4 votes |
public BaseBroadcastOp(SameDiff sameDiff, SDVariable i_v, int[] dimension, Object[] extraArgs) { this(sameDiff, i_v, i_v.getShape(), false, dimension, extraArgs); }
Example 14
Source File: LocallyConnected2D.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public SDVariable defineLayer(SameDiff sameDiff, SDVariable layerInput, Map<String, SDVariable> paramTable, SDVariable mask) { SDVariable w = paramTable.get(ConvolutionParamInitializer.WEIGHT_KEY); long[] inputShape = layerInput.getShape(); long miniBatch = inputShape[0]; int outH = outputSize[0]; int outW = outputSize[1]; int sH = stride[0]; int sW = stride[1]; int kH = kernel[0]; int kW = kernel[1]; boolean nchw = format == CNN2DFormat.NCHW; if(!nchw) layerInput = layerInput.permute(0,3,1,2); //NHWC to NCHW if(padding[0] > 0 || padding[1] > 0 || (cm == ConvolutionMode.Same && (paddingBr[0] > 0 || paddingBr[1] > 0))){ //Note: for same mode, bottom/right padding can be 1 more than top/left padding //NCHW format if(cm == ConvolutionMode.Same){ layerInput = sameDiff.nn().pad(layerInput, sameDiff.constant(Nd4j.createFromArray(new int[][]{{0,0},{0,0},{padding[0], paddingBr[0]}, {padding[1], paddingBr[1]}})), PadMode.CONSTANT, 0.0); } else { layerInput = sameDiff.nn().pad(layerInput, sameDiff.constant(Nd4j.createFromArray(new int[][]{{0,0},{0,0},{padding[0], padding[0]}, {padding[1], padding[1]}})), PadMode.CONSTANT, 0.0); } } SDVariable[] inputArray = new SDVariable[outH * outW]; for (int i = 0; i < outH; i++) { for (int j = 0; j < outW; j++) { SDVariable slice = layerInput.get(SDIndex.all(), // miniBatch SDIndex.all(), // nIn SDIndex.interval(i * sH, i * sH + kH), // kernel height SDIndex.interval(j * sW, j * sW + kW) // kernel width ); inputArray[i * outH + j] = sameDiff.reshape(slice, 1, miniBatch, featureDim); } } SDVariable concatOutput = sameDiff.concat(0, inputArray); // (outH * outW, miniBatch, featureDim) SDVariable mmulResult = sameDiff.mmul(concatOutput, w); // (outH * outW, miniBatch, nOut) SDVariable reshapeResult = sameDiff.reshape(mmulResult, outH, outW, miniBatch, nOut); SDVariable permutedResult = nchw ? reshapeResult.permute(2, 3, 0, 1) : reshapeResult.permute(2, 0, 1, 3); // (mb, nOut, outH, outW) or (mb, outH, outW, nOut) if (hasBias) { SDVariable b = paramTable.get(ConvolutionParamInitializer.BIAS_KEY); SDVariable biasAddedResult = sameDiff.nn().biasAdd(permutedResult, b, nchw); return activation.asSameDiff("out", sameDiff, biasAddedResult); } else { return activation.asSameDiff("out", sameDiff, permutedResult); } }
Example 15
Source File: TensorMmul.java From nd4j with Apache License 2.0 | 4 votes |
private SDVariable doTensorMmul(SDVariable a, SDVariable b, int[][] axes) { int validationLength = Math.min(axes[0].length, axes[1].length); for (int i = 0; i < validationLength; i++) { if (a.getShape()[axes[0][i]] != b.getShape()[axes[1][i]]) throw new IllegalArgumentException("Size of the given axes at each dimension must be the same size."); if (axes[0][i] < 0) axes[0][i] += a.getShape().length; if (axes[1][i] < 0) axes[1][i] += b.getShape().length; } List<Integer> listA = new ArrayList<>(); for (int i = 0; i < a.getShape().length; i++) { if (!Ints.contains(axes[0], i)) listA.add(i); } int[] newAxesA = Ints.concat(Ints.toArray(listA), axes[0]); List<Integer> listB = new ArrayList<>(); for (int i = 0; i < b.getShape().length; i++) { if (!Ints.contains(axes[1], i)) listB.add(i); } int[] newAxesB = Ints.concat(axes[1], Ints.toArray(listB)); int n2 = 1; int aLength = Math.min(a.getShape().length, axes[0].length); for (int i = 0; i < aLength; i++) { n2 *= a.getShape()[axes[0][i]]; } //if listA and listB are empty these do not initialize. //so initializing with {1} which will then get overridden if not empty long[] newShapeA = {-1, n2}; long[] oldShapeA; if (listA.size() == 0) { oldShapeA = new long[] {1}; } else { oldShapeA = Longs.toArray(listA); for (int i = 0; i < oldShapeA.length; i++) oldShapeA[i] = a.getShape()[(int) oldShapeA[i]]; } int n3 = 1; int bNax = Math.min(b.getShape().length, axes[1].length); for (int i = 0; i < bNax; i++) { n3 *= b.getShape()[axes[1][i]]; } int[] newShapeB = {n3, -1}; long[] oldShapeB; if (listB.size() == 0) { oldShapeB = new long[] {1}; } else { oldShapeB = Longs.toArray(listB); for (int i = 0; i < oldShapeB.length; i++) oldShapeB[i] = b.getShape()[(int) oldShapeB[i]]; } SDVariable at = f() .reshape(f().permute (a,newAxesA),newShapeA); SDVariable bt = f() .reshape(f() .permute(b,newAxesB),newShapeB); SDVariable ret = f().mmul(at,bt); long[] aPlusB = Longs.concat(oldShapeA, oldShapeB); return f().reshape(ret, aPlusB); }
Example 16
Source File: ShapeOp.java From nd4j with Apache License 2.0 | 4 votes |
public ShapeOp(SameDiff sameDiff, SDVariable i_v, boolean inPlace) { this(sameDiff,i_v,i_v.getShape(),inPlace,null); }
Example 17
Source File: BaseTransformOp.java From nd4j with Apache License 2.0 | 4 votes |
public BaseTransformOp(SameDiff sameDiff, SDVariable i_v, Object[] extraArgs) { this(sameDiff,i_v,i_v.getShape(),false,extraArgs); }
Example 18
Source File: BaseTransformOp.java From nd4j with Apache License 2.0 | 4 votes |
public BaseTransformOp(SameDiff sameDiff,SDVariable i_v,boolean inPlace) { this(sameDiff,i_v,i_v.getShape(),inPlace,null); }
Example 19
Source File: BaseBroadcastOp.java From nd4j with Apache License 2.0 | 4 votes |
public BaseBroadcastOp(SameDiff sameDiff, SDVariable i_v, int[] dimension, Object[] extraArgs) { this(sameDiff, i_v, i_v.getShape(), false, dimension, extraArgs); }
Example 20
Source File: BaseBroadcastOp.java From nd4j with Apache License 2.0 | 4 votes |
public BaseBroadcastOp(SameDiff sameDiff, SDVariable i_v, int[] dimension, boolean inPlace) { this(sameDiff, i_v, i_v.getShape(), inPlace, dimension, null); }