Java Code Examples for org.deeplearning4j.nn.workspace.LayerWorkspaceMgr#notifyScopeBorrowed()
The following examples show how to use
org.deeplearning4j.nn.workspace.LayerWorkspaceMgr#notifyScopeBorrowed() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RepeatVector.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { assertInputSet(true); if(epsilon.dataType() != dataType){ epsilon = epsilon.castTo(dataType); } INDArray outEpsilon; try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATION_GRAD)){ if (layerConf().getDataFormat() == RNNFormat.NCW) { outEpsilon = epsilon.sum(2); }else{ outEpsilon = epsilon.sum(1); } } Gradient gradient = new DefaultGradient(); return new Pair<>(gradient, outEpsilon); }
Example 2
Source File: RepeatVector.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) { assertInputSet(false); if (cacheMode == null) cacheMode = CacheMode.NONE; INDArray z = preOutput(training, false, workspaceMgr); if (training && cacheMode != CacheMode.NONE && workspaceMgr.hasConfiguration(ArrayType.FF_CACHE) && workspaceMgr.isWorkspaceOpen(ArrayType.FF_CACHE)) { try (MemoryWorkspace wsB = workspaceMgr.notifyScopeBorrowed(ArrayType.FF_CACHE)) { preOutput = z.unsafeDuplication(); } } return z; }
Example 3
Source File: Upsampling3D.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) { assertInputSet(false); applyDropOutIfNecessary(training, workspaceMgr); if (cacheMode == null) cacheMode = CacheMode.NONE; INDArray z = preOutput(training, false, workspaceMgr); // we do cache only if cache workspace exists. Skip otherwise if (training && cacheMode != CacheMode.NONE && workspaceMgr.hasConfiguration(ArrayType.FF_CACHE) && workspaceMgr.isWorkspaceOpen(ArrayType.FF_CACHE)) { try (MemoryWorkspace wsB = workspaceMgr.notifyScopeBorrowed(ArrayType.FF_CACHE)) { preOutput = z.unsafeDuplication(); } } return z; }
Example 4
Source File: Upsampling2D.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) { assertInputSet(false); applyDropOutIfNecessary(training, workspaceMgr); if (cacheMode == null) cacheMode = CacheMode.NONE; INDArray z = preOutput(training, false, workspaceMgr); // we do cache only if cache workspace exists. Skip otherwise if (training && cacheMode != CacheMode.NONE && workspaceMgr.hasConfiguration(ArrayType.FF_CACHE) && workspaceMgr.isWorkspaceOpen(ArrayType.FF_CACHE)) { try (MemoryWorkspace wsB = workspaceMgr.notifyScopeBorrowed(ArrayType.FF_CACHE)) { preOutput = z.unsafeDuplication(); } } return z; }
Example 5
Source File: L2NormalizeVertex.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray doForward(boolean training, LayerWorkspaceMgr workspaceMgr) { if (!canDoForward()) throw new IllegalStateException("Cannot do forward pass: inputs not set (L2NormalizeVertex " + vertexName + " idx " + vertexIndex + ")"); // L2 norm along all dimensions except 0, unless user-specified // x / |x|2 INDArray x = inputs[0]; int[] dimensions = getDimensions(x); INDArray xNorm2 = x.norm2(dimensions); Transforms.max(xNorm2, eps, false); try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATIONS)){ if (x.rank() == 2) { return x.divColumnVector(xNorm2); } else { INDArray out = Nd4j.createUninitialized(x.shape(), x.ordering()); return Nd4j.getExecutioner().exec(new BroadcastDivOp(x, xNorm2, out, 0)); } } }
Example 6
Source File: L2Vertex.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray doForward(boolean training, LayerWorkspaceMgr workspaceMgr) { if (!canDoForward()) throw new IllegalStateException("Cannot do forward pass: input not set"); INDArray a = inputs[0]; INDArray b = inputs[1]; int[] dimensions = new int[a.rank() - 1]; for (int i = 1; i < a.rank(); i++) { dimensions[i - 1] = i; } try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATIONS)) { INDArray arr = Nd4j.getExecutioner().exec(new EuclideanDistance(a, b, dimensions)); return arr.reshape(arr.size(0), 1); } }
Example 7
Source File: Cropping1DLayer.java From deeplearning4j with Apache License 2.0 | 5 votes |
private INDArray inputSubset(INDArray from, ArrayType arrayType, LayerWorkspaceMgr workspaceMgr){ try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(arrayType)){ if(from.dataType() == dataType){ return from.get(all(), all(), interval(cropping[0], from.size(2)-cropping[1])).dup(from.ordering()); } else { return from.get(all(), all(), interval(cropping[0], from.size(2)-cropping[1])).castTo(dataType); } } }
Example 8
Source File: ScaleVertex.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public INDArray doForward(boolean training, LayerWorkspaceMgr workspaceMgr) { if (!canDoForward()) throw new IllegalStateException("Cannot do forward pass: inputs not set (ScaleVertex " + vertexName + " idx " + vertexIndex + ")"); if (inputs.length > 1) throw new IllegalArgumentException( "ScaleVertex (name " + vertexName + " idx " + vertexIndex + ") only supports 1 input."); try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATIONS)){ return inputs[0].mul(scaleFactor); } }
Example 9
Source File: ScaleVertex.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public Pair<Gradient, INDArray[]> doBackward(boolean tbptt, LayerWorkspaceMgr workspaceMgr) { if (!canDoBackward()) throw new IllegalStateException("Cannot do backward pass: errors not set (ScaleVertex " + vertexName + " idx " + vertexIndex + ")"); try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATION_GRAD)){ return new Pair<>(null, new INDArray[] {epsilon.mul(scaleFactor)}); } }
Example 10
Source File: ShiftVertex.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public INDArray doForward(boolean training, LayerWorkspaceMgr workspaceMgr) { if (!canDoForward()) throw new IllegalStateException("Cannot do forward pass: inputs not set (ShiftVertex " + vertexName + " idx " + vertexIndex + ")"); if (inputs.length > 1) throw new IllegalArgumentException( "ShiftVertex (name " + vertexName + " idx " + vertexIndex + ") only supports 1 input."); try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATIONS)){ return inputs[0].add(shiftFactor); } }
Example 11
Source File: L2Vertex.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public Pair<Gradient, INDArray[]> doBackward(boolean tbptt, LayerWorkspaceMgr workspaceMgr) { if (!canDoBackward()) throw new IllegalStateException("Cannot do backward pass: error not set"); INDArray a = inputs[0]; INDArray b = inputs[1]; INDArray out = doForward(tbptt, workspaceMgr); Transforms.max(out, eps, false); // in case of 0 INDArray dLdlambda = epsilon; //dL/dlambda aka 'epsilon' - from layer above INDArray sNegHalf = out.rdiv(1.0); //s^(-1/2) = 1.0 / s^(1/2) = 1.0 / out INDArray diff; try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATION_GRAD)){ diff = a.sub(b); } INDArray first = dLdlambda.mul(sNegHalf); //Column vector for all cases INDArray dLda; INDArray dLdb; if (a.rank() == 2) { //2d case (MLPs etc) dLda = diff.muliColumnVector(first); try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATION_GRAD)) { dLdb = dLda.neg(); } } else { //RNN and CNN case - Broadcast along dimension 0 dLda = Nd4j.getExecutioner().exec(new BroadcastMulOp(diff, first, diff, 0)); try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATION_GRAD)) { dLdb = dLda.neg(); } } return new Pair<>(null, new INDArray[] {dLda, dLdb}); }
Example 12
Source File: LSTMHelpers.java From deeplearning4j with Apache License 2.0 | 4 votes |
private static void cacheEnter(boolean training, CacheMode cacheMode, LayerWorkspaceMgr workspaceMgr){ if (shouldCache(training, cacheMode, workspaceMgr)) { workspaceMgr.notifyScopeBorrowed(ArrayType.FF_CACHE); } }
Example 13
Source File: StackVertex.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public INDArray doForward(boolean training, LayerWorkspaceMgr workspaceMgr) { // stacking along dimension 0 // inputs[] is an array of INDArray (e.g.: shape of 3 x [nExamples, nSize]) // what we want to do is make a stacked output (e.g.: [3 x nExamples, nSize]) lastInputShapes = null; int nStack = inputs.length; val inShape = inputs[0].shape(); val outShape = new long[inShape.length]; // create the new shape outShape[0] = nStack * inShape[0]; for (int i = 1; i < inShape.length; i++) { outShape[i] = inShape[i]; } boolean variableLengthTS = false; if (inShape.length == 3) { //RNN data - check for variable length time series long minLength = inputs[0].size(2); long maxLength = minLength; for (int i = 1; i < inputs.length; i++) { long thisLength = inputs[i].size(2); minLength = Math.min(minLength, thisLength); maxLength = Math.max(maxLength, thisLength); } variableLengthTS = (minLength != maxLength); if (!variableLengthTS) { try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATIONS)) { return Nd4j.concat(0, inputs); } } outShape[2] = maxLength; INDArray out = workspaceMgr.create(ArrayType.ACTIVATIONS, inputs[0].dataType(), outShape); long numExamples = inputs[0].size(0); lastInputShapes = new long[inputs.length][0]; for (int i = 0; i < inputs.length; i++) { out.put(new INDArrayIndex[] {NDArrayIndex.interval(i * numExamples, (i + 1) * numExamples), NDArrayIndex.all(), NDArrayIndex.interval(0, inputs[i].size(2))}, inputs[i]); lastInputShapes[i] = inputs[i].shape(); } return out; } else { try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATIONS)) { return Nd4j.concat(0, inputs); } } }
Example 14
Source File: MergeVertex.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public INDArray doForward(boolean training, LayerWorkspaceMgr workspaceMgr) { if (!canDoForward()) throw new IllegalStateException("Cannot do forward pass: inputs not set"); if (inputs.length == 1) { //No-op case val shape = inputs[0].shape(); forwardPassShapes = new long[][] {Arrays.copyOf(shape, shape.length)}; return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, inputs[0]); } INDArray[] in = new INDArray[inputs.length]; for( int i=0; i<in.length; i++ ){ in[i] = inputs[i].castTo(dataType); //No-op if correct type } forwardPassShapes = new long[in.length][0]; val nExamples = in[0].size(0); fwdPassRank = in[0].rank(); for (int i = 0; i < in.length; i++) { val currShape = in[i].shape(); if (fwdPassRank != currShape.length) { throw new IllegalStateException( "Cannot merge activations with different ranks: first activations have rank " + fwdPassRank + ", activations[" + i + "] have rank " + currShape.length + " (shape=" + Arrays.toString(currShape) + ")"); } forwardPassShapes[i] = Arrays.copyOf(currShape, currShape.length); if (currShape[0] != nExamples) { throw new IllegalStateException( "Cannot merge activations with different number of examples (activations[0] shape: " + Arrays.toString(in[0].shape()) + ", activations[" + i + "] shape: " + Arrays.toString(in[i].shape())); } } try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATIONS)){ INDArray out = Nd4j.concat(mergeAxis, in); return out; } }