org.deeplearning4j.nn.workspace.LayerWorkspaceMgr Java Examples
The following examples show how to use
org.deeplearning4j.nn.workspace.LayerWorkspaceMgr.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Upsampling3D.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) { assertInputSet(false); applyDropOutIfNecessary(training, workspaceMgr); if (cacheMode == null) cacheMode = CacheMode.NONE; INDArray z = preOutput(training, false, workspaceMgr); // we do cache only if cache workspace exists. Skip otherwise if (training && cacheMode != CacheMode.NONE && workspaceMgr.hasConfiguration(ArrayType.FF_CACHE) && workspaceMgr.isWorkspaceOpen(ArrayType.FF_CACHE)) { try (MemoryWorkspace wsB = workspaceMgr.notifyScopeBorrowed(ArrayType.FF_CACHE)) { preOutput = z.unsafeDuplication(); } } return z; }
Example #2
Source File: CenterLossOutputLayer.java From deeplearning4j with Apache License 2.0 | 6 votes |
/**Compute the score for each example individually, after labels and input have been set. * * @param fullNetRegTerm Regularization term for the entire network (or, 0.0 to not include regularization) * @return A column INDArray of shape [numExamples,1], where entry i is the score of the ith example */ @Override public INDArray computeScoreForExamples(double fullNetRegTerm, LayerWorkspaceMgr workspaceMgr) { if (input == null || labels == null) throw new IllegalStateException("Cannot calculate score without input and labels " + layerId()); INDArray preOut = preOutput2d(false, workspaceMgr); // calculate the intra-class score component INDArray centers = params.get(CenterLossParamInitializer.CENTER_KEY); INDArray centersForExamples = labels.mmul(centers); INDArray intraClassScoreArray = input.sub(centersForExamples); // calculate the inter-class score component ILossFunction interClassLoss = layerConf().getLossFn(); INDArray scoreArray = interClassLoss.computeScoreArray(getLabels2d(workspaceMgr, ArrayType.FF_WORKING_MEM), preOut, layerConf().getActivationFn(), maskArray); scoreArray.addi(intraClassScoreArray.muli(layerConf().getLambda() / 2)); if (fullNetRegTerm != 0.0) { scoreArray.addi(fullNetRegTerm); } return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, scoreArray); }
Example #3
Source File: DropConnect.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray getParameter(Layer layer, String paramKey, int iteration, int epoch, boolean train, LayerWorkspaceMgr workspaceMgr) { ParamInitializer init = layer.conf().getLayer().initializer(); INDArray param = layer.getParam(paramKey); double p; if(weightRetainProbSchedule == null){ p = weightRetainProb; } else { p = weightRetainProbSchedule.valueAt(iteration, epoch); } if (train && init.isWeightParam(layer.conf().getLayer(), paramKey) || (applyToBiases && init.isBiasParam(layer.conf().getLayer(), paramKey))) { INDArray out = workspaceMgr.createUninitialized(ArrayType.INPUT, param.dataType(), param.shape(), param.ordering()); Nd4j.getExecutioner().exec(new DropOut(param, out, p)); return out; } return param; }
Example #4
Source File: DeepFMOutputLayer.java From jstarcraft-rns with Apache License 2.0 | 6 votes |
@Override public Pair<Gradient, INDArray> backpropGradient(INDArray previous, LayerWorkspaceMgr workspaceMgr) { assertInputSet(true); Pair<Gradient, INDArray> pair = getGradientsAndDelta(preOutput2d(true, workspaceMgr), workspaceMgr); // Returns Gradient and delta^(this), not Gradient and epsilon^(this-1) INDArray delta = pair.getSecond(); INDArray w = getParamWithNoise(DefaultParamInitializer.WEIGHT_KEY, true, workspaceMgr); INDArray epsilonNext = workspaceMgr.createUninitialized(ArrayType.ACTIVATION_GRAD, new long[] { w.size(0), delta.size(0) }, 'f'); epsilonNext = w.mmuli(delta.transpose(), epsilonNext).transpose(); // Normally we would clear weightNoiseParams here - but we want to reuse them // for forward + backward + score // So this is instead done in MultiLayerNetwork/CompGraph backprop methods epsilonNext = backpropDropOutIfPresent(epsilonNext); return new Pair<>(pair.getFirst(), epsilonNext); }
Example #5
Source File: TestGraphNodes.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testReshapeNode() { Nd4j.getRandom().setSeed(12345); GraphVertex reshapeVertex = new ReshapeVertex(null, "", -1, 'c', new int[] {-1, 736}, null, Nd4j.dataType()); val inputShape = new long[] {1, 1, 1, 736}; INDArray input = Nd4j.create(inputShape); reshapeVertex.setInputs(input); INDArray out = reshapeVertex.doForward(false, LayerWorkspaceMgr.noWorkspaces()); assertArrayEquals(new long[] {1, 736}, out.shape()); reshapeVertex.setEpsilon(out); INDArray[] backward = reshapeVertex.doBackward(false, LayerWorkspaceMgr.noWorkspaces()).getSecond(); assertTrue(Arrays.equals(backward[0].shape(), inputShape)); }
Example #6
Source File: PLNetDyadRanker.java From AILibs with GNU Affero General Public License v3.0 | 6 votes |
private INDArray computeScaledGradient(final INDArray dyadMatrix) { int dyadRankingLength = dyadMatrix.rows(); List<INDArray> activations = this.plNet.feedForward(dyadMatrix); INDArray output = activations.get(activations.size() - 1); output = output.transpose(); INDArray deltaW = Nd4j.zeros(this.plNet.params().length()); Gradient deltaWk = null; MultiLayerNetwork plNetClone = this.plNet.clone(); for (int k = 0; k < dyadRankingLength; k++) { // compute derivative of loss w.r.t. k plNetClone.setInput(dyadMatrix.getRow(k)); plNetClone.feedForward(true, false); INDArray lossGradient = PLNetLoss.computeLossGradient(output, k); // compute backprop gradient for weight updates w.r.t. k Pair<Gradient, INDArray> p = plNetClone.backpropGradient(lossGradient, null); deltaWk = p.getFirst(); this.plNet.getUpdater().update(this.plNet, deltaWk, this.iteration, this.epoch, 1, LayerWorkspaceMgr.noWorkspaces()); deltaW.addi(deltaWk.gradient()); } return deltaW; }
Example #7
Source File: GaussianDropout.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray applyDropout(INDArray inputActivations, INDArray output, int iteration, int epoch, LayerWorkspaceMgr workspaceMgr) { double r; if(rateSchedule != null){ r = rateSchedule.valueAt(iteration, epoch); } else { r = rate; } double stdev = Math.sqrt(r / (1.0 - r)); noise = workspaceMgr.createUninitialized(ArrayType.INPUT, output.dataType(), inputActivations.shape(), inputActivations.ordering()); Nd4j.getExecutioner().exec(new GaussianDistribution(noise, 1.0, stdev)); return Nd4j.getExecutioner().exec(new MulOp(inputActivations, noise, output))[0]; }
Example #8
Source File: L2Vertex.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray doForward(boolean training, LayerWorkspaceMgr workspaceMgr) { if (!canDoForward()) throw new IllegalStateException("Cannot do forward pass: input not set"); INDArray a = inputs[0]; INDArray b = inputs[1]; int[] dimensions = new int[a.rank() - 1]; for (int i = 1; i < a.rank(); i++) { dimensions[i - 1] = i; } try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATIONS)) { INDArray arr = Nd4j.getExecutioner().exec(new EuclideanDistance(a, b, dimensions)); return arr.reshape(arr.size(0), 1); } }
Example #9
Source File: ReshapePreprocessor.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray preProcess(INDArray input, int miniBatchSize, LayerWorkspaceMgr workspaceMgr) { // the target shape read from a keras config does not have mini-batch size included. We prepend it here dynamically. long[] targetShape = getShape(this.targetShape, miniBatchSize); long[] inputShape = getShape(this.inputShape, miniBatchSize); if (prodLong(input.shape()) == prodLong((targetShape))) { if (input.ordering() != 'c' || !Shape.hasDefaultStridesForShape(input)) { input = workspaceMgr.dup(ArrayType.ACTIVATIONS, input, 'c'); } return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, input.reshape(targetShape)); } else { throw new IllegalStateException("Input shape " + Arrays.toString(input.shape()) + " and output shape" + Arrays.toString(inputShape) + " do not match"); } }
Example #10
Source File: RnnToFeedForwardPreProcessor.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray preProcess(INDArray input, int miniBatchSize, LayerWorkspaceMgr workspaceMgr) { //Need to reshape RNN activations (3d) activations to 2d (for input into feed forward layer) if (input.rank() != 3) throw new IllegalArgumentException( "Invalid input: expect NDArray with rank 3 (i.e., activations for RNN layer)"); if (input.ordering() != 'f' || !Shape.hasDefaultStridesForShape(input)) input = workspaceMgr.dup(ArrayType.ACTIVATIONS, input, 'f'); if (rnnDataFormat == RNNFormat.NWC){ input = input.permute(0, 2, 1); } val shape = input.shape(); INDArray ret; if (shape[0] == 1) { ret = input.tensorAlongDimension(0, 1, 2).permute(1, 0); //Edge case: miniBatchSize==1 } else if (shape[2] == 1) { ret = input.tensorAlongDimension(0, 1, 0); //Edge case: timeSeriesLength=1 } else { INDArray permuted = input.permute(0, 2, 1); //Permute, so we get correct order after reshaping ret = permuted.reshape('f', shape[0] * shape[2], shape[1]); } return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, ret); }
Example #11
Source File: PReLU.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray activate(boolean training, LayerWorkspaceMgr mgr) { assertInputSet(false); applyDropOutIfNecessary(training, mgr); INDArray in; if (training) { in = mgr.dup(ArrayType.ACTIVATIONS, input, input.ordering()); } else { in = mgr.leverageTo(ArrayType.ACTIVATIONS, input); } INDArray alpha = getParam(PReLUParamInitializer.WEIGHT_KEY); return new ActivationPReLU(alpha, axes).getActivation(in, training); }
Example #12
Source File: SpatialDropout.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray applyDropout(INDArray inputActivations, INDArray output, int iteration, int epoch, LayerWorkspaceMgr workspaceMgr) { Preconditions.checkArgument(inputActivations.rank() == 5 || inputActivations.rank() == 4 || inputActivations.rank() == 3, "Cannot apply spatial dropout to activations of rank %s: " + "spatial dropout can only be used for rank 3, 4 or 5 activations (input activations shape: %s)" , inputActivations.rank(), inputActivations.shape()); double currP; if (pSchedule != null) { currP = pSchedule.valueAt(iteration, epoch); } else { currP = p; } val minibatch = inputActivations.size(0); val dim1 = inputActivations.size(1); mask = workspaceMgr.createUninitialized(ArrayType.INPUT, output.dataType(), minibatch, dim1).assign(1.0); Nd4j.getExecutioner().exec(new DropOutInverted(mask, currP)); Broadcast.mul(inputActivations, mask, output, 0, 1); return output; }
Example #13
Source File: CnnLossLayer.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) { assertInputSet(false); if (input.rank() != 4) throw new UnsupportedOperationException( "Input must be rank 4 with shape " + layerConf().getFormat().dimensionNames() + ". Got input with rank " + input.rank() + " " + layerId()); CNN2DFormat format = layerConf().getFormat(); INDArray in = workspaceMgr.dup(ArrayType.ACTIVATIONS, input, input.ordering()); INDArray input2d = ConvolutionUtils.reshape4dTo2d(in, format, workspaceMgr, ArrayType.ACTIVATIONS); INDArray out2d = layerConf().getActivationFn().getActivation(input2d, training); return ConvolutionUtils.reshape2dTo4d(out2d, input.shape(), format, workspaceMgr, ArrayType.ACTIVATIONS); }
Example #14
Source File: TestDropout.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testGaussianNoiseValues(){ Nd4j.getRandom().setSeed(12345); GaussianNoise d = new GaussianNoise(0.1); //sqrt(0.1/(1-0.1)) = 0.3333 stdev INDArray in = Nd4j.ones(50, 50); INDArray out = d.applyDropout(in, Nd4j.create(in.shape()), 0, 0, LayerWorkspaceMgr.noWorkspacesImmutable()); assertEquals(in, Nd4j.ones(50, 50)); double mean = out.meanNumber().doubleValue(); double stdev = out.stdNumber().doubleValue(); assertEquals(1.0, mean, 0.05); assertEquals(0.1, stdev, 0.01); }
Example #15
Source File: ConvolutionUtils.java From deeplearning4j with Apache License 2.0 | 6 votes |
public static INDArray reshape2dTo4d(INDArray in2d, long[] toShape, CNN2DFormat format, LayerWorkspaceMgr workspaceMgr, ArrayType type){ if(in2d.rank() != 2) throw new IllegalArgumentException("Invalid input: expect NDArray with rank 2"); if (toShape.length != 4) throw new IllegalArgumentException("Invalid input: expect toShape with 4 elements: got " + Arrays.toString(toShape)); if (in2d.ordering() != 'c' || !Shape.hasDefaultStridesForShape(in2d)) in2d = workspaceMgr.dup(type, in2d, 'c'); if(format == CNN2DFormat.NCHW) { //Reshape: from [n*h*w,c] to [n,h,w,c] to [n,c,h,w] INDArray out = in2d.reshape('c', toShape[0], toShape[2], toShape[3], toShape[1]); return workspaceMgr.leverageTo(type, out.permute(0, 3, 1, 2)); } else { //Reshape: from [n*h*w,c] to [n,h,w,c] return workspaceMgr.leverageTo(type, in2d.reshape('c', toShape)); } }
Example #16
Source File: MultiLayerNetwork.java From deeplearning4j with Apache License 2.0 | 6 votes |
/** * Calculate activation for few layers at once. Suitable for autoencoder partial activation. * * In example: in 10-layer deep autoencoder, layers 0 - 4 inclusive are used for encoding part, and layers 5-9 inclusive are used for decoding part. * * @param from first layer to be activated, inclusive * @param to last layer to be activated, inclusive * @return the activation from the last layer */ public INDArray activateSelectedLayers(int from, int to, INDArray input) { if (input == null) throw new IllegalStateException("Unable to perform activation; no input found"); if (from < 0 || from >= layers.length || from >= to) throw new IllegalStateException("Unable to perform activation; FROM is out of layer space"); if (to < 1 || to >= layers.length) throw new IllegalStateException("Unable to perform activation; TO is out of layer space"); try { LayerWorkspaceMgr mgr = LayerWorkspaceMgr.noWorkspaces(helperWorkspaces); //TODO INDArray res = input; for (int l = from; l <= to; l++) { res = this.activationFromPrevLayer(l, res, false, mgr); } return res; } catch (OutOfMemoryError e){ CrashReportingUtil.writeMemoryCrashDump(this, e); throw e; } }
Example #17
Source File: CustomLayerImpl.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) { /* The activate method is used for doing forward pass. Note that it relies on the pre-output method; essentially we are just applying the activation function (or, functions in this example). In this particular (contrived) example, we have TWO activation functions - one for the first half of the outputs and another for the second half. */ INDArray output = preOutput(training, workspaceMgr); int columns = output.columns(); INDArray firstHalf = output.get(NDArrayIndex.all(), NDArrayIndex.interval(0, columns / 2)); INDArray secondHalf = output.get(NDArrayIndex.all(), NDArrayIndex.interval(columns / 2, columns)); IActivation activation1 = layerConf().getActivationFn(); IActivation activation2 = ((CustomLayer) conf.getLayer()).getSecondActivationFunction(); //IActivation function instances modify the activation functions in-place activation1.getActivation(firstHalf, training); activation2.getActivation(secondHalf, training); return output; }
Example #18
Source File: PermutePreprocessor.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public INDArray backprop(INDArray output, int miniBatchSize, LayerWorkspaceMgr workspaceMgr) { if (output.ordering() != 'c' || !Shape.hasDefaultStridesForShape(output)) { output = workspaceMgr.dup(ArrayType.ACTIVATIONS, output, 'c'); } return workspaceMgr.leverageTo(ArrayType.ACTIVATION_GRAD, output.permute(permutationIndices)); }
Example #19
Source File: FrozenLayerWithBackprop.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public void fit(INDArray data, LayerWorkspaceMgr workspaceMgr) { if (!logFit) { OneTimeLogger.info(log, "Frozen layers cannot be fit, but backpropagation will continue.Warning will be issued only once per instance"); logFit = true; } }
Example #20
Source File: BaseOutputLayer.java From deeplearning4j with Apache License 2.0 | 5 votes |
/** * Returns the predictions for each example in the dataset * @param input the matrix to predict * @return the prediction for the dataset */ @Override public int[] predict(INDArray input) { INDArray output = activate(input, false, LayerWorkspaceMgr.noWorkspacesImmutable()); Preconditions.checkState(output.rank() == 2, "predict(INDArray) method can only be used on rank 2 output - got array with rank %s", output.rank()); return output.argMax(1).toIntVector(); }
Example #21
Source File: Cropping3DLayer.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { val inShape = input.shape(); INDArray epsNext = workspaceMgr.create(ArrayType.ACTIVATION_GRAD, input.dataType(), inShape, 'c'); INDArray epsNextSubset = inputSubset(epsNext); epsNextSubset.assign(epsilon); return new Pair<>((Gradient) new DefaultGradient(), epsNext); }
Example #22
Source File: BidirectionalLayer.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public Pair<INDArray, MaskState> feedForwardMaskArray(INDArray maskArray, MaskState currentMaskState, int minibatchSize) { Pair<INDArray,MaskState> ret = fwd.feedForwardMaskArray(maskArray, currentMaskState, minibatchSize); bwd.feedForwardMaskArray(TimeSeriesUtils.reverseTimeSeriesMask(maskArray, LayerWorkspaceMgr.noWorkspaces(), ArrayType.INPUT), //TODO currentMaskState, minibatchSize); return ret; }
Example #23
Source File: Subsampling1DLayer.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) { if (input.rank() != 3) throw new DL4JInvalidInputException("Got rank " + input.rank() + " array as input to Subsampling1DLayer with shape " + Arrays.toString(input.shape()) + ". Expected rank 3 array with shape [minibatchSize, features, length]. " + layerId()); // add singleton fourth dimension to input INDArray origInput = input; input = input.castTo(dataType).reshape(input.size(0), input.size(1), input.size(2), 1); // call 2D SubsamplingLayer's activate method INDArray acts = super.activate(training, workspaceMgr); // remove singleton fourth dimension from input and output activations input = origInput; acts = acts.reshape(acts.size(0), acts.size(1), acts.size(2)); if(maskArray != null){ INDArray maskOut = feedForwardMaskArray(maskArray, MaskState.Active, (int)acts.size(0)).getFirst(); Preconditions.checkState(acts.size(0) == maskOut.size(0) && acts.size(2) == maskOut.size(1), "Activations dimensions (0,2) and mask dimensions (0,1) don't match: Activations %s, Mask %s", acts.shape(), maskOut.shape()); Broadcast.mul(acts, maskOut, acts, 0, 2); } return acts; }
Example #24
Source File: FrozenLayerWithBackprop.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr) { if (!logGradient) { OneTimeLogger.info(log, "Gradients for the frozen layer are not set and will therefore will not be updated.Warning will be issued only once per instance"); logGradient = true; } underlying.score(); //no op }
Example #25
Source File: Cnn3DLossLayer.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { assertInputSet(true); if (input.rank() != 5) throw new UnsupportedOperationException( "Input is not rank 5. Got input with rank " + input.rank() + " " + layerId() + " with shape " + Arrays.toString(input.shape()) + " - expected shape [minibatch,channels,depth,height,width]"); if (labels == null) throw new IllegalStateException("Labels are not set (null)"); INDArray input2d = ConvolutionUtils.reshape5dTo2d(layerConf().getDataFormat(), input, workspaceMgr, ArrayType.FF_WORKING_MEM); INDArray labels2d = ConvolutionUtils.reshape5dTo2d(layerConf().getDataFormat(), labels, workspaceMgr, ArrayType.FF_WORKING_MEM); INDArray maskReshaped = ConvolutionUtils.reshapeCnn3dMask(layerConf().getDataFormat(), maskArray, labels, workspaceMgr, ArrayType.FF_WORKING_MEM); // delta calculation ILossFunction lossFunction = layerConf().getLossFn(); INDArray delta2d = lossFunction.computeGradient(labels2d, input2d.dup(input2d.ordering()), layerConf().getActivationFn(), maskReshaped); delta2d = workspaceMgr.leverageTo(ArrayType.ACTIVATION_GRAD, delta2d); long n = input.size(0); long d, h, w, c; if(layerConf().getDataFormat() == Convolution3D.DataFormat.NDHWC){ d = input.size(1); h = input.size(2); w = input.size(3); c = input.size(4); } else { d = input.size(2); h = input.size(3); w = input.size(4); c = input.size(1); } INDArray delta5d = ConvolutionUtils.reshape2dTo5d(layerConf().getDataFormat(), delta2d, n, d, h, w, c, workspaceMgr, ArrayType.ACTIVATION_GRAD); // grab the empty gradient Gradient gradient = new DefaultGradient(); return new Pair<>(gradient, delta5d); }
Example #26
Source File: FrozenLayer.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public void fit(INDArray data, LayerWorkspaceMgr workspaceMgr) { if (!logFit) { OneTimeLogger.info(log, "Frozen layers cannot be fit.Warning will be issued only once per instance"); logFit = true; } }
Example #27
Source File: SimpleRnn.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public INDArray rnnTimeStep(INDArray input, LayerWorkspaceMgr workspaceMgr) { setInput(input, workspaceMgr); INDArray last = stateMap.get(STATE_KEY_PREV_ACTIVATION); INDArray out = activateHelper(last, false, false, workspaceMgr).getFirst(); try(MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()){ stateMap.put(STATE_KEY_PREV_ACTIVATION, out.get(all(), all(), point(out.size(2)-1)).dup()); } return out; }
Example #28
Source File: LocalResponseTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Before public void doBefore() { NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).seed(123) .layer(new LocalResponseNormalization.Builder().k(2).n(5).alpha(1e-4).beta(0.75).build()) .build(); layer = new LocalResponseNormalization().instantiate(conf, null, 0, null, false, Nd4j.defaultFloatingPointType()); activationsActual = layer.activate(x, false, LayerWorkspaceMgr.noWorkspaces()); }
Example #29
Source File: ReshapeVertex.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public Pair<Gradient, INDArray[]> doBackward(boolean tbptt, LayerWorkspaceMgr workspaceMgr) { if (!canDoBackward()) throw new IllegalStateException("Cannot do backward pass: errors not set"); INDArray[] out = new INDArray[1]; out[0] = workspaceMgr.dup(ArrayType.ACTIVATION_GRAD, epsilon.reshape(order, inputs[0].shape())); return new Pair<>(null, out); }
Example #30
Source File: BaseLayer.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr) { if (this.input == null) return; INDArray output = activate(true, workspaceMgr); setScoreWithZ(output); }