Java Code Examples for org.deeplearning4j.nn.workspace.LayerWorkspaceMgr#create()
The following examples show how to use
org.deeplearning4j.nn.workspace.LayerWorkspaceMgr#create() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SubsetVertex.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public Pair<Gradient, INDArray[]> doBackward(boolean tbptt, LayerWorkspaceMgr workspaceMgr) { if (!canDoBackward()) throw new IllegalStateException("Cannot do backward pass: error not set"); INDArray out = workspaceMgr.create(ArrayType.ACTIVATION_GRAD, epsilon.dataType(), forwardShape); switch (forwardShape.length) { case 2: out.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.interval(from, to, true)}, epsilon); break; case 3: out.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.interval(from, to, true), NDArrayIndex.all()}, epsilon); break; case 4: out.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.interval(from, to, true), NDArrayIndex.all(), NDArrayIndex.all()}, epsilon); break; default: throw new RuntimeException("Invalid activation rank"); //Should never happen } return new Pair<>(null, new INDArray[] {out}); }
Example 2
Source File: UnstackVertex.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public Pair<Gradient, INDArray[]> doBackward(boolean tbptt, LayerWorkspaceMgr workspaceMgr) { if (!canDoBackward()) throw new IllegalStateException("Cannot do backward pass: error not set"); INDArray out = workspaceMgr.create(ArrayType.ACTIVATION_GRAD, inputs[0].dataType(), forwardShape); long start = from * step; long end = (from + 1) * step; switch (forwardShape.length) { case 2: out.put(new INDArrayIndex[] {NDArrayIndex.interval(start, end), NDArrayIndex.all()}, epsilon); break; case 3: out.put(new INDArrayIndex[] {NDArrayIndex.interval(start, end), NDArrayIndex.all(), NDArrayIndex.all()}, epsilon); break; case 4: out.put(new INDArrayIndex[] {NDArrayIndex.interval(start, end), NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.all()}, epsilon); break; default: throw new RuntimeException("Invalid activation rank"); //Should never happen } return new Pair<>(null, new INDArray[] {out}); }
Example 3
Source File: LastTimeStepVertex.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public Pair<Gradient, INDArray[]> doBackward(boolean tbptt, LayerWorkspaceMgr workspaceMgr) { //Allocate the appropriate sized array: INDArray epsilonsOut = workspaceMgr.create(ArrayType.ACTIVATION_GRAD, epsilon.dataType(), fwdPassShape, 'f'); if (fwdPassTimeSteps == null) { //Last time step for all examples epsilonsOut.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(fwdPassShape[2] - 1)}, epsilon); } else { //Different time steps were extracted for each example for (int i = 0; i < fwdPassTimeSteps.length; i++) { epsilonsOut.put(new INDArrayIndex[] {NDArrayIndex.point(i), NDArrayIndex.all(), NDArrayIndex.point(fwdPassTimeSteps[i])}, epsilon.getRow(i)); } } return new Pair<>(null, new INDArray[] {epsilonsOut}); }
Example 4
Source File: ZeroPadding3DLayer.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) { assertInputSet(false); val inShape = input.shape(); val outD = inShape[2] + padding[0] + padding[1]; val outH = inShape[3] + padding[2] + padding[3]; val outW = inShape[4] + padding[4] + padding[5]; val outShape = new long[] {inShape[0], inShape[1], outD, outH, outW}; INDArray out = workspaceMgr.create(ArrayType.ACTIVATIONS, input.dataType(), outShape, 'c'); out.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(padding[0], padding[0] + inShape[2]), NDArrayIndex.interval(padding[2], padding[2] + inShape[3]), NDArrayIndex.interval(padding[4], padding[4] + inShape[4])}, input); return out; }
Example 5
Source File: SpaceToDepth.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { assertInputSet(true); INDArray input = this.input.castTo(epsilon.dataType()); boolean nchw = layerConf().getDataFormat() == CNN2DFormat.NCHW; long miniBatch = input.size(0); long inDepth = input.size(nchw ? 1 : 3); long inH = input.size(nchw ? 2 : 1); long inW = input.size(nchw ? 3 : 2); long[] epsShape = nchw ? new long[]{miniBatch, inDepth, inH, inW} : new long[]{miniBatch, inH, inW, inDepth}; INDArray outEpsilon = workspaceMgr.create(ArrayType.ACTIVATION_GRAD, input.dataType(), epsShape, 'c'); Gradient gradient = new DefaultGradient(); int blockSize = getBlockSize(); //Workaround for issue: https://github.com/eclipse/deeplearning4j/issues/8859 if(!Shape.hasDefaultStridesForShape(epsilon)) epsilon = epsilon.dup('c'); CustomOp op = DynamicCustomOp.builder("depth_to_space") .addInputs(epsilon) .addIntegerArguments(blockSize, nchw ? 0 : 1) //nchw = 0, nhwc = 1 .addOutputs(outEpsilon) .build(); Nd4j.getExecutioner().exec(op); return new Pair<>(gradient, outEpsilon); }
Example 6
Source File: PoolHelperVertex.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public Pair<Gradient, INDArray[]> doBackward(boolean tbptt, LayerWorkspaceMgr workspaceMgr) { if (!canDoBackward()) throw new IllegalStateException("Cannot do backward pass: errors not set"); INDArray out = workspaceMgr.create(ArrayType.ACTIVATION_GRAD, epsilon.dataType(), epsilon.size(0), epsilon.size(1), 1+epsilon.size(2), 1+epsilon.size(2)); out.get(NDArrayIndex.all(), NDArrayIndex.all(),NDArrayIndex.interval(1, inputs[0].size(2)), NDArrayIndex.interval(1, inputs[0].size(3))) .assign(epsilon); return new Pair<>(null, new INDArray[] {out}); }
Example 7
Source File: TimeSeriesUtils.java From deeplearning4j with Apache License 2.0 | 5 votes |
/** * Reverse an input time series along the time dimension * * @param in Input activations to reverse, with shape [minibatch, size, timeSeriesLength] * @return Reversed activations */ public static INDArray reverseTimeSeries(INDArray in, LayerWorkspaceMgr workspaceMgr, ArrayType arrayType){ if(in == null){ return null; } if(in.ordering() != 'f' || in.isView() || !Shape.strideDescendingCAscendingF(in)){ in = workspaceMgr.dup(arrayType, in, 'f'); } if (in.size(2) > Integer.MAX_VALUE) throw new ND4JArraySizeException(); int[] idxs = new int[(int) in.size(2)]; int j=0; for( int i=idxs.length-1; i>=0; i--){ idxs[j++] = i; } INDArray inReshape = in.reshape('f', in.size(0)*in.size(1), in.size(2)); INDArray outReshape = workspaceMgr.create(arrayType, in.dataType(), new long[]{inReshape.size(0), idxs.length}, 'f'); Nd4j.pullRows(inReshape, outReshape, 0, idxs); return workspaceMgr.leverageTo(arrayType, outReshape.reshape('f', in.size(0), in.size(1), in.size(2))); /* INDArray out = Nd4j.createUninitialized(in.shape(), 'f'); CustomOp op = DynamicCustomOp.builder("reverse") .addIntegerArguments(new int[]{0,1}) .addInputs(in) .addOutputs(out) .callInplace(false) .build(); Nd4j.getExecutioner().exec(op); return out; */ }
Example 8
Source File: DuplicateToTimeSeriesVertex.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public INDArray doForward(boolean training, LayerWorkspaceMgr workspaceMgr) { //First: work out the time series length val tsLength = graph.getInput(inputVertexIndex).size(2); val outShape = new long[] {inputs[0].size(0), inputs[0].size(1), tsLength}; INDArray out = workspaceMgr.create(ArrayType.ACTIVATIONS, inputs[0].dataType(), outShape, 'f'); for (int i = 0; i < tsLength; i++) { out.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(i)}, inputs[0]); } return out; }
Example 9
Source File: Cropping2DLayer.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { val inShape = input.shape(); INDArray epsNext = workspaceMgr.create(ArrayType.ACTIVATION_GRAD, input.dataType(), inShape, 'c'); INDArray epsNextSubset = inputSubset(epsNext); epsNextSubset.assign(epsilon); return new Pair<>((Gradient) new DefaultGradient(), epsNext); }
Example 10
Source File: Cropping3DLayer.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { val inShape = input.shape(); INDArray epsNext = workspaceMgr.create(ArrayType.ACTIVATION_GRAD, input.dataType(), inShape, 'c'); INDArray epsNextSubset = inputSubset(epsNext); epsNextSubset.assign(epsilon); return new Pair<>((Gradient) new DefaultGradient(), epsNext); }
Example 11
Source File: ZeroPadding1DLayer.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) { assertInputSet(false); val inShape = input.shape(); val paddedOut = inShape[2] + padding[0] + padding[1]; val outShape = new long[] {inShape[0], inShape[1], paddedOut}; INDArray out = workspaceMgr.create(ArrayType.ACTIVATIONS, dataType, outShape, 'c'); out.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(padding[0], padding[0] + inShape[2])}, input); return out; }
Example 12
Source File: ZeroPaddingLayer.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) { assertInputSet(false); boolean nchw = layerConf().getDataFormat() == CNN2DFormat.NCHW; int hIdx = nchw ? 2 : 1; int wIdx = nchw ? 3 : 2; int[] padding = layerConf().getPadding(); val inShape = input.shape(); val outH = inShape[hIdx] + padding[0] + padding[1]; val outW = inShape[wIdx] + padding[2] + padding[3]; val outShape = nchw ? new long[] {inShape[0], inShape[1], outH, outW} : new long[] {inShape[0], outH, outW, inShape[3]}; INDArray out = workspaceMgr.create(ArrayType.ACTIVATIONS, input.dataType(), outShape, 'c'); if(nchw) { out.put(new INDArrayIndex[]{NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(padding[0], padding[0] + inShape[hIdx]), NDArrayIndex.interval(padding[2], padding[2] + inShape[wIdx])}, input); } else { out.put(new INDArrayIndex[]{NDArrayIndex.all(), NDArrayIndex.interval(padding[0], padding[0] + inShape[hIdx]), NDArrayIndex.interval(padding[2], padding[2] + inShape[wIdx]), NDArrayIndex.all()}, input); } return out; }
Example 13
Source File: Cropping1DLayer.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { val inShape = input.shape(); INDArray epsNext = workspaceMgr.create(ArrayType.ACTIVATION_GRAD, dataType, inShape, 'c'); INDArray epsNextSubset = epsNext.get(all(), all(), interval(cropping[0], epsNext.size(2)-cropping[1])); epsNextSubset.assign(epsilon); return new Pair<>((Gradient) new DefaultGradient(), epsNext); }
Example 14
Source File: Deconvolution3DLayer.java From deeplearning4j with Apache License 2.0 | 4 votes |
protected INDArray preOutput(boolean training , LayerWorkspaceMgr workspaceMgr) { INDArray bias = getParamWithNoise(DeconvolutionParamInitializer.BIAS_KEY, training, workspaceMgr); INDArray weights = getParamWithNoise(DeconvolutionParamInitializer.WEIGHT_KEY, training, workspaceMgr); //Input validation: expect rank 5 matrix if (input.rank() != 5) { throw new DL4JInvalidInputException("Got rank " + input.rank() + " array as input to Deconvolution3DLayer with shape " + Arrays.toString(input.shape()) + ". Expected rank 5 array with shape [minibatchSize, channels, inputHeight, inputWidth, inputDepth] or" + " [minibatchSize, inputHeight, inputWidth, inputDepth, channels]. " + layerId()); } Convolution3D.DataFormat df = layerConf().getDataFormat(); boolean ncdhw = layerConf().getDataFormat() == Convolution3D.DataFormat.NCDHW; int chDim = ncdhw ? 1 : 4; if (input.size(chDim) != layerConf().getNIn() ) { String layerName = conf.getLayer().getLayerName(); if (layerName == null) layerName = "(not named)"; throw new DL4JInvalidInputException("Cannot do forward pass in Deconvolution3D layer (layer name = " + layerName + ", layer index = " + index + "): input array channels does not match CNN layer configuration" + " (data input channels = " + input.size(chDim) + ", " + (ncdhw ? "[minibatch,channels,height,width,depth]=" : "[minibatch,height,width,depth,channels]=") + Arrays.toString(input.shape()) + "; expected" + " input channels = " + layerConf().getNIn() + ") " + layerId()); } int[] dilation = layerConf().getDilation(); int[] kernel = layerConf().getKernelSize(); int[] strides = layerConf().getStride(); int[] pad; ConvolutionMode cm = layerConf().getConvolutionMode(); long[] outSize; int[] inSize = df == Convolution3D.DataFormat.NCDHW ? new int[]{(int)input.size(2), (int)input.size(3), (int)input.size(4)} : new int[]{(int)input.size(1), (int)input.size(2), (int)input.size(3)}; if (cm == ConvolutionMode.Same) { outSize = ConvolutionUtils.getDeconvolution3DOutputSize(input, kernel, strides, null, dilation, cm, layerConf().getDataFormat()); //Also performs validation pad = ConvolutionUtils.getSameModeTopLeftPadding(ArrayUtil.toInts(outSize), inSize, kernel, strides, dilation ); } else { pad = layerConf().getPadding(); outSize = ConvolutionUtils.getDeconvolution3DOutputSize(input, kernel, strides, pad, dilation, cm, layerConf().getDataFormat()); //Also performs validation } long outH = outSize[0]; long outW = outSize[1]; long outD = outSize[2]; val miniBatch = input.size(0); long[] outShape = df == Convolution3D.DataFormat.NCDHW ? new long[]{miniBatch, layerConf().getNOut(), outH, outW, outD} : new long[]{miniBatch, outH, outW, outD, layerConf().getNOut()}; INDArray output = workspaceMgr.create(ArrayType.ACTIVATIONS, input.dataType(), outShape, 'c'); int sameMode = (cm == ConvolutionMode.Same) ? 1 : 0; int[] args = new int[] { kernel[0], kernel[1], kernel[2], strides[0], strides[1], strides[2], pad[0], pad[1], pad[2], dilation[0], dilation[1], dilation[2], sameMode, df == Convolution3D.DataFormat.NCDHW ? 0 : 1 }; INDArray[] opInputs; if (layerConf().hasBias()) { opInputs = new INDArray[]{input, weights, bias}; } else { opInputs = new INDArray[]{input, weights}; } CustomOp op = DynamicCustomOp.builder("deconv3d") .addInputs(opInputs) .addIntegerArguments(args) .addOutputs(output) .callInplace(false) .build(); Nd4j.getExecutioner().exec(op); return output; }
Example 15
Source File: SpaceToBatch.java From deeplearning4j with Apache License 2.0 | 4 votes |
protected INDArray preOutput(boolean training, boolean forBackprop, LayerWorkspaceMgr workspaceMgr) { assertInputSet(false); applyDropOutIfNecessary(training, null); if (input.rank() != 4) { throw new DL4JInvalidInputException("Got rank " + input.rank() + " array as input to space to batch with shape " + Arrays.toString(input.shape()) + ". Expected rank 4 array with shape " + layerConf().getFormat().dimensionNames() + ". " + layerId()); } if (preOutput != null && forBackprop) { return preOutput; } boolean nchw = layerConf().getFormat() == CNN2DFormat.NCHW; long inMiniBatch = input.size(0); long depth = input.size(nchw ? 1 : 3); long inH = input.size(nchw ? 2 : 1); long inW = input.size(nchw ? 3 : 2); int[] blocks = getBlocks(); int[][] padding = getPadding(); long paddedH = inH + padding[0][0] + padding[0][1]; long paddedW = inW + padding[1][0] + padding[1][1]; long outH = paddedH / blocks[0]; long outW = paddedW / blocks[1]; long outMiniBatch = inMiniBatch * blocks[0] * blocks[1]; long[] outShape = nchw ? new long[]{outMiniBatch, depth, outH, outW} : new long[]{outMiniBatch, outH, outW, depth}; INDArray out = workspaceMgr.create(ArrayType.ACTIVATIONS, input.dataType(), outShape, 'c'); INDArray inNHWC = nchw ? input.permute(0, 2, 3, 1) : input; INDArray outNHWC = nchw ? out.permute(0, 2, 3, 1) : out; CustomOp op = DynamicCustomOp.builder("space_to_batch_nd") .addInputs(inNHWC, getBlocksArray(), getPaddingArray()) .addOutputs(outNHWC) .build(); Nd4j.exec(op); return out; }
Example 16
Source File: Deconvolution3DLayer.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { assertInputSet(true); if (input.rank() != 5) { throw new DL4JInvalidInputException("Got rank " + input.rank() + " array as input to Deconvolution3DLayer with shape " + Arrays.toString(input.shape()) + ". Expected rank 5 array with shape [minibatchSize, channels, inputHeight, inputWidth, inputDepth] or" + " [minibatchSize, inputHeight, inputWidth, inputDepth, channels]. " + layerId()); } INDArray weights = getParamWithNoise(DeconvolutionParamInitializer.WEIGHT_KEY, true, workspaceMgr); Convolution3D.DataFormat df = layerConf().getDataFormat(); ConvolutionMode cm = layerConf().getConvolutionMode(); int[] dilation = layerConf().getDilation(); int[] kernel = layerConf().getKernelSize(); int[] strides = layerConf().getStride(); int[] pad = layerConf().getPadding(); INDArray biasGradView = gradientViews.get(DeconvolutionParamInitializer.BIAS_KEY); INDArray weightGradView = gradientViews.get(DeconvolutionParamInitializer.WEIGHT_KEY); INDArray outEps = workspaceMgr.create(ArrayType.ACTIVATION_GRAD, weights.dataType(), input.shape(), 'c'); Integer sameMode = (layerConf().getConvolutionMode() == ConvolutionMode.Same) ? 1 : 0; int[] args = new int[] { kernel[0], kernel[1], kernel[2], strides[0], strides[1], strides[2], pad[0], pad[1], pad[2], dilation[0], dilation[1], dilation[2], sameMode, df == Convolution3D.DataFormat.NCDHW ? 0 : 1 }; INDArray delta; IActivation afn = layerConf().getActivationFn(); INDArray preOutput = preOutput(true, workspaceMgr); delta = afn.backprop(preOutput, epsilon).getFirst(); INDArray[] opInputs; INDArray[] opOutputs; if(layerConf().hasBias()){ INDArray bias = getParamWithNoise(DeconvolutionParamInitializer.BIAS_KEY, true, workspaceMgr); opInputs = new INDArray[]{input, weights, bias, delta}; opOutputs = new INDArray[]{outEps, weightGradView, biasGradView}; } else { opInputs = new INDArray[]{input, weights, delta}; opOutputs = new INDArray[]{outEps, weightGradView}; } CustomOp op = DynamicCustomOp.builder("deconv3d_bp") .addInputs(opInputs) .addIntegerArguments(args) .addOutputs(opOutputs) .callInplace(false) .build(); Nd4j.getExecutioner().exec(op); Gradient retGradient = new DefaultGradient(); if(layerConf().hasBias()){ retGradient.setGradientFor(DeconvolutionParamInitializer.BIAS_KEY, biasGradView); } retGradient.setGradientFor(DeconvolutionParamInitializer.WEIGHT_KEY, weightGradView, 'c'); weightNoiseParams.clear(); return new Pair<>(retGradient, outEps); }
Example 17
Source File: Deconvolution2DLayer.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { assertInputSet(true); if (input.rank() != 4) { throw new DL4JInvalidInputException("Got rank " + input.rank() + " array as input to Deconvolution2DLayer with shape " + Arrays.toString(input.shape()) + ". Expected rank 4 array with shape " + layerConf().getCnn2dDataFormat().dimensionNames() + ". " + layerId()); } INDArray weights = getParamWithNoise(DeconvolutionParamInitializer.WEIGHT_KEY, true, workspaceMgr); CNN2DFormat format = layerConf().getCnn2dDataFormat(); boolean nchw = format == CNN2DFormat.NCHW; int hDim = nchw ? 2 : 1; int wDim = nchw ? 3 : 2; long miniBatch = input.size(0); long inH = input.size(hDim); long inW = input.size(wDim); long inDepth = weights.size(0); long kH = weights.size(2); long kW = weights.size(3); int[] dilation = layerConf().getDilation(); int[] kernel = layerConf().getKernelSize(); int[] strides = layerConf().getStride(); int[] pad; if (convolutionMode == ConvolutionMode.Same) { int[] outSize = new int[]{(int)epsilon.size(hDim), (int)epsilon.size(wDim)}; pad = ConvolutionUtils.getSameModeTopLeftPadding(outSize, new int[] {(int)inH, (int)inW}, kernel, strides, dilation); } else { pad = layerConf().getPadding(); } INDArray biasGradView = gradientViews.get(DeconvolutionParamInitializer.BIAS_KEY); INDArray weightGradView = gradientViews.get(DeconvolutionParamInitializer.WEIGHT_KEY); long[] epsShape = nchw ? new long[]{miniBatch, inDepth, inH, inW} : new long[]{miniBatch, inH, inW, inDepth}; INDArray outEps = workspaceMgr.create(ArrayType.ACTIVATION_GRAD, weights.dataType(), epsShape, 'c'); Integer sameMode = (convolutionMode == ConvolutionMode.Same) ? 1 : 0; int[] args = new int[] { (int)kH, (int)kW, strides[0], strides[1], pad[0], pad[1], dilation[0], dilation[1], sameMode, nchw ? 0 : 1 //0 = NCHW; 1 = NHWC }; INDArray delta; IActivation afn = layerConf().getActivationFn(); Pair<INDArray, INDArray> p = preOutput4d(true, true, workspaceMgr); delta = afn.backprop(p.getFirst(), epsilon).getFirst(); //DL4J Deconv weights: [inputDepth, outputDepth, kH, kW] //libnd4j weights: [kH, kW, oC, iC] weights = weights.permute(2, 3, 1, 0); INDArray weightGradViewOp = weightGradView.permute(2, 3, 1, 0); INDArray[] opInputs; INDArray[] opOutputs; if(layerConf().hasBias()){ INDArray bias = getParamWithNoise(DeconvolutionParamInitializer.BIAS_KEY, true, workspaceMgr); opInputs = new INDArray[]{input, weights, bias, delta}; opOutputs = new INDArray[]{outEps, weightGradViewOp, biasGradView}; } else { opInputs = new INDArray[]{input, weights, delta}; opOutputs = new INDArray[]{outEps, weightGradViewOp}; } CustomOp op = DynamicCustomOp.builder("deconv2d_bp") .addInputs(opInputs) .addIntegerArguments(args) .addOutputs(opOutputs) .callInplace(false) .build(); Nd4j.getExecutioner().exec(op); Gradient retGradient = new DefaultGradient(); if(layerConf().hasBias()){ retGradient.setGradientFor(DeconvolutionParamInitializer.BIAS_KEY, biasGradView); } retGradient.setGradientFor(DeconvolutionParamInitializer.WEIGHT_KEY, weightGradView, 'c'); weightNoiseParams.clear(); return new Pair<>(retGradient, outEps); }
Example 18
Source File: LastTimeStepVertex.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public INDArray doForward(boolean training, LayerWorkspaceMgr workspaceMgr) { //First: get the mask arrays for the given input, if any INDArray[] inputMaskArrays = graph.getInputMaskArrays(); INDArray mask = (inputMaskArrays != null ? inputMaskArrays[inputIdx] : null); //Then: work out, from the mask array, which time step of activations we want, extract activations //Also: record where they came from (so we can do errors later) fwdPassShape = inputs[0].shape(); INDArray out; if (mask == null) { //No mask array -> extract same (last) column for all long lastTS = inputs[0].size(2) - 1; out = inputs[0].get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(lastTS)); out = workspaceMgr.dup(ArrayType.ACTIVATIONS, out); fwdPassTimeSteps = null; //Null -> last time step for all examples } else { val outShape = new long[] {inputs[0].size(0), inputs[0].size(1)}; out = workspaceMgr.create(ArrayType.ACTIVATIONS, inputs[0].dataType(), outShape); //Want the index of the last non-zero entry in the mask array. //Check a little here by using mulRowVector([0,1,2,3,...]) and argmax long maxTsLength = fwdPassShape[2]; INDArray row = Nd4j.linspace(0, maxTsLength - 1, maxTsLength, mask.dataType()); INDArray temp = mask.mulRowVector(row); INDArray lastElementIdx = Nd4j.argMax(temp, 1); fwdPassTimeSteps = new int[(int)fwdPassShape[0]]; for (int i = 0; i < fwdPassTimeSteps.length; i++) { fwdPassTimeSteps[i] = (int) lastElementIdx.getDouble(i); } //Now, get and assign the corresponding subsets of 3d activations: for (int i = 0; i < fwdPassTimeSteps.length; i++) { out.putRow(i, inputs[0].get(NDArrayIndex.point(i), NDArrayIndex.all(), NDArrayIndex.point(fwdPassTimeSteps[i]))); } } return out; }
Example 19
Source File: StackVertex.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public INDArray doForward(boolean training, LayerWorkspaceMgr workspaceMgr) { // stacking along dimension 0 // inputs[] is an array of INDArray (e.g.: shape of 3 x [nExamples, nSize]) // what we want to do is make a stacked output (e.g.: [3 x nExamples, nSize]) lastInputShapes = null; int nStack = inputs.length; val inShape = inputs[0].shape(); val outShape = new long[inShape.length]; // create the new shape outShape[0] = nStack * inShape[0]; for (int i = 1; i < inShape.length; i++) { outShape[i] = inShape[i]; } boolean variableLengthTS = false; if (inShape.length == 3) { //RNN data - check for variable length time series long minLength = inputs[0].size(2); long maxLength = minLength; for (int i = 1; i < inputs.length; i++) { long thisLength = inputs[i].size(2); minLength = Math.min(minLength, thisLength); maxLength = Math.max(maxLength, thisLength); } variableLengthTS = (minLength != maxLength); if (!variableLengthTS) { try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATIONS)) { return Nd4j.concat(0, inputs); } } outShape[2] = maxLength; INDArray out = workspaceMgr.create(ArrayType.ACTIVATIONS, inputs[0].dataType(), outShape); long numExamples = inputs[0].size(0); lastInputShapes = new long[inputs.length][0]; for (int i = 0; i < inputs.length; i++) { out.put(new INDArrayIndex[] {NDArrayIndex.interval(i * numExamples, (i + 1) * numExamples), NDArrayIndex.all(), NDArrayIndex.interval(0, inputs[i].size(2))}, inputs[i]); lastInputShapes[i] = inputs[i].shape(); } return out; } else { try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATIONS)) { return Nd4j.concat(0, inputs); } } }
Example 20
Source File: YoloUtils.java From deeplearning4j with Apache License 2.0 | 4 votes |
public static INDArray activate(@NonNull INDArray boundingBoxPriors, @NonNull INDArray input, boolean nchw, LayerWorkspaceMgr layerWorkspaceMgr){ if(!nchw) input = input.permute(0,3,1,2); //NHWC to NCHW long mb = input.size(0); long h = input.size(2); long w = input.size(3); long b = boundingBoxPriors.size(0); long c = input.size(1)/b-5; //input.size(1) == b * (5 + C) -> C = (input.size(1)/b) - 5 INDArray output = layerWorkspaceMgr.create(ArrayType.ACTIVATIONS, input.dataType(), input.shape(), 'c'); INDArray output5 = output.reshape('c', mb, b, 5+c, h, w); INDArray output4 = output; //output.get(all(), interval(0,5*b), all(), all()); INDArray input4 = input.dup('c'); //input.get(all(), interval(0,5*b), all(), all()).dup('c'); INDArray input5 = input4.reshape('c', mb, b, 5+c, h, w); //X/Y center in grid: sigmoid INDArray predictedXYCenterGrid = input5.get(all(), all(), interval(0,2), all(), all()); Transforms.sigmoid(predictedXYCenterGrid, false); //width/height: prior * exp(input) INDArray predictedWHPreExp = input5.get(all(), all(), interval(2,4), all(), all()); INDArray predictedWH = Transforms.exp(predictedWHPreExp, false); Broadcast.mul(predictedWH, boundingBoxPriors.castTo(input.dataType()), predictedWH, 1, 2); //Box priors: [b, 2]; predictedWH: [mb, b, 2, h, w] //Confidence - sigmoid INDArray predictedConf = input5.get(all(), all(), point(4), all(), all()); //Shape: [mb, B, H, W] Transforms.sigmoid(predictedConf, false); output4.assign(input4); //Softmax //TODO OPTIMIZE? INDArray inputClassesPreSoftmax = input5.get(all(), all(), interval(5, 5+c), all(), all()); //Shape: [minibatch, C, H, W] INDArray classPredictionsPreSoftmax2d = inputClassesPreSoftmax.permute(0,1,3,4,2) //[minibatch, b, c, h, w] To [mb, b, h, w, c] .dup('c').reshape('c', new long[]{mb*b*h*w, c}); Transforms.softmax(classPredictionsPreSoftmax2d, false); INDArray postSoftmax5d = classPredictionsPreSoftmax2d.reshape('c', mb, b, h, w, c ).permute(0, 1, 4, 2, 3); INDArray outputClasses = output5.get(all(), all(), interval(5, 5+c), all(), all()); //Shape: [minibatch, C, H, W] outputClasses.assign(postSoftmax5d); if(!nchw) output = output.permute(0,2,3,1); //NCHW to NHWC return output; }