Java Code Examples for org.nd4j.linalg.factory.Nd4j#exec()
The following examples show how to use
org.nd4j.linalg.factory.Nd4j#exec() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AMSGradUpdater.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public void applyUpdater(INDArray gradient, int iteration, int epoch) { if (m == null || v == null || vHat == null) throw new IllegalStateException("Updater has not been initialized with view state"); double beta1 = config.getBeta1(); double beta2 = config.getBeta2(); double learningRate = config.getLearningRate(iteration, epoch); double epsilon = config.getEpsilon(); //m_t = b_1 * m_{t-1} + (1-b_1) * g_t eq 1 pg 3 //v_t = b_2 * v_{t-1} + (1-b_2) * (g_t)^2 eq 1 pg 3 //vHat_t = max(vHat_{t-1}, v_t) //gradient array contains: sqrt(vHat) + eps //gradient = alphat * m_t / (sqrt(vHat) + eps) Nd4j.exec(new AmsGradUpdater(gradient, v, m, vHat, learningRate, beta1, beta2, epsilon, iteration)); }
Example 2
Source File: RandomTests.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testBernoulli(){ Nd4j.getRandom().setSeed(12345); INDArray arr = Nd4j.create(DataType.DOUBLE, 100); Nd4j.exec(new BernoulliDistribution(arr, 0.5)); // System.out.println(arr); double sum = arr.sumNumber().doubleValue(); assertTrue(String.valueOf(sum), sum > 0.0 && sum < 100.0); }
Example 3
Source File: CustomOpsTests.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testCompareAndBitpack() { INDArray in = Nd4j.createFromArray(new double[]{-12.f, -11.f, -10.f, -9.f, -8.f, -7.f, -6.f, -5.f, -4.f, -3.f, -2.f, -1.f, 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f}).reshape( 2,3,4); INDArray out = Nd4j.createUninitialized(DataType.UBYTE, 2,3,4); INDArray expected = Nd4j.createFromArray(new byte[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1}). reshape(2,3,4); Nd4j.exec(new CompareAndBitpack(in ,2.0, out)); assertArrayEquals(new long[]{2,3,4}, out.shape()); }
Example 4
Source File: BaseNDArray.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public INDArray gt(INDArray other) { validateNumericalArray("greater than (gt)", false); if (Shape.shapeEquals(this.shape(), other.shape())) { return Nd4j.getExecutioner().exec(new GreaterThan(this, other, Nd4j.createUninitialized(DataType.BOOL, this.shape(), this.ordering())))[0]; } else if (Shape.areShapesBroadcastable(this.shape(), other.shape())) { return Nd4j.exec(new GreaterThan(new INDArray[]{this, other}, new INDArray[]{Nd4j.createUninitialized(DataType.BOOL, Shape.broadcastOutputShape(this.shape(), other.shape()))}))[0]; } else throw new IllegalArgumentException("Shapes must be broadcastable"); }
Example 5
Source File: CustomOpsTests.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void isMax4d_2dims(){ Nd4j.getRandom().setSeed(12345); INDArray in = Nd4j.rand(DataType.FLOAT, 3, 3, 4, 4).permute(0, 2, 3, 1); INDArray out_permutedIn = in.like(); INDArray out_dupedIn = in.like(); Nd4j.exec(new IsMax(in.dup(), out_dupedIn, 2, 3)); Nd4j.exec(new IsMax(in, out_permutedIn, 2, 3)); assertEquals(out_dupedIn, out_permutedIn); }
Example 6
Source File: UpdaterValidation.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testAmsGradUpdater(){ double lr = 1e-3; double beta1 = 0.9; double beta2 = 0.999; double eps = 1e-8; INDArray m = Nd4j.zeros(DataType.DOUBLE, 1, 5); INDArray v = Nd4j.zeros(DataType.DOUBLE, 1, 5); INDArray vH = Nd4j.zeros(DataType.DOUBLE, 1, 5); Map<String,INDArray> state = new HashMap<>(); state.put("M", m.dup()); state.put("V", v.dup()); state.put("V_HAT", vH.dup()); AMSGradUpdater u = (AMSGradUpdater) new AMSGrad(lr, beta1, beta2, eps).instantiate(state, true); assertEquals(m, state.get("M")); assertEquals(v, state.get("V")); assertEquals(vH, state.get("V_HAT")); for( int i=0; i<3; i++ ) { INDArray g1 = Nd4j.linspace(DataType.DOUBLE, 1, 5, 1).reshape(1,5); INDArray g2 = g1.dup(); val g3 = g1.dup(); val mu = m.dup(); val vu = v.dup(); val hu = vH.dup(); UpdaterJavaCode.applyAmsGradUpdater(g1, m, v, vH, lr, beta1, beta2, eps, i); u.applyUpdater(g2, i, 0); Nd4j.exec(new AmsGradUpdater(g3, vu, mu, hu, lr, beta1, beta2, eps, i)); assertEquals(m, state.get("M")); assertEquals(v, state.get("V")); assertEquals(vH, state.get("V_HAT")); assertEquals(g1, g2); assertEquals(m, mu); assertEquals(v, vu); assertEquals(vH, hu); assertEquals(g1, g3); } }
Example 7
Source File: ReductionOpValidation.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testMultiHeadedDotProductAttentionWeirdInputs(){ final INDArray k = Nd4j.rand(new int[]{10, 4, 5}); final INDArray v = Nd4j.rand(new int[]{10, 4, 5}); final INDArray q = Nd4j.rand(new int[]{10, 4, 2}); final INDArray Wk = Nd4j.rand(new int[]{2, 3, 4}); final INDArray Wv = Nd4j.rand(new int[]{2, 3, 4}); final INDArray Wq = Nd4j.rand(new int[]{2, 3, 4}); final INDArray Wo = Nd4j.rand(new int[]{2* 3, 8}); final INDArray mask = Nd4j.rand(10, 5).gte(0.2).castTo(DataType.DOUBLE); final INDArray kP = Nd4j.tensorMmul(k, Wk, new int[][]{{1}, {2}}).permutei(0, 2, 3, 1); final INDArray vP = Nd4j.tensorMmul(v, Wv, new int[][]{{1}, {2}}).permutei(0, 2, 3, 1); final INDArray qP = Nd4j.tensorMmul(q, Wq, new int[][]{{1}, {2}}).permutei(0, 2, 3, 1); final DynamicCustomOp dot_product_attention = DynamicCustomOp .builder("dot_product_attention") .addInputs(qP, kP, vP, mask) .addIntegerArguments(1, 0) .build(); final INDArray[] outputs = Nd4j.exec(dot_product_attention); final INDArray attOut = outputs[0].permutei(0, 3, 1, 2).reshape(k.size(0), q.size(2), Wv.size(0) * Wv.size(1)); final INDArray out = Nd4j.tensorMmul(attOut, Wo, new int[][]{{2}, {0}}).permutei(0, 2, 1); final INDArray finalOut = out.norm2(); for (char orderWeights: new char[]{'f', 'c'}){ for (char orderInput: new char[]{'f', 'c'}){ log.info("-*- Starting Test: input Order = {}, weightOrder = {} -*-", orderInput, orderWeights); SameDiff sd = SameDiff.create(); SDVariable sdQ = sd.var("q", q.dup(orderInput)); SDVariable sdK = sd.var("k", k.dup(orderInput)); SDVariable sdV = sd.var("v", v.dup(orderInput)); SDVariable sdWq = sd.var("Wq", Wq.dup(orderWeights)); SDVariable sdWk = sd.var("Wk", Wk.dup(orderWeights)); SDVariable sdWv = sd.var("Wv", Wv.dup(orderWeights)); SDVariable sdWo = sd.var("Wo", Wo.dup(orderWeights)); SDVariable sdMask = sd.constant("mask", mask); SDVariable t = sd.nn.multiHeadDotProductAttention(sdQ, sdK, sdV, sdWq, sdWk, sdWv, sdWo, sdMask, true); t.norm2("out"); String err = OpValidation.validate(new TestCase(sd) .expectedOutput("out", finalOut) .gradientCheck(false) .gradCheckSkipVariables("mask")); assertNull(err); } } }
Example 8
Source File: MKLDNNBatchNormHelper.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public INDArray preOutput(INDArray x, boolean training, long[] shape, INDArray gamma, INDArray beta, INDArray mean, INDArray var, double decay, double eps, CNN2DFormat format, LayerWorkspaceMgr workspaceMgr) { if(x.dataType() != DataType.FLOAT) return null; //MKL-DNN only supports float int axis = (x.rank() != 4 || format == CNN2DFormat.NCHW) ? 1 : 3; if(context == null){ context = Nd4j.getExecutioner().buildContext(); context.setIArguments( ArrayUtil.fromBoolean(gamma != null), ArrayUtil.fromBoolean(beta != null), axis); //Axis - 1 = NCHW, 3 = NHWC context.setTArguments(eps); } //Mean and variance: args here are *global*. Depending on train/test mode we might need to use batch mean/var INDArray m, v; if(training){ if(meanCache == null){ try(MemoryWorkspace ws = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { meanCache = Nd4j.createUninitialized(x.dataType(), x.size(axis)); varCache = Nd4j.createUninitialized(x.dataType(), x.size(axis)); } } int[] dims; if(x.rank() == 2){ dims = RANK2_DIMS; } else if(format == CNN2DFormat.NCHW){ dims = RANK4_DIMS_NCHW; } else { dims = RANK4_DIMS_NHWC; } x.mean(meanCache, dims); Nd4j.exec(new Variance(x, varCache, false, dims)); m = meanCache; v = varCache; } else { m = mean.reshape(mean.length()); v = var.reshape(var.length()); } //Note: batchnorm op expects rank 1 inputs for mean/var etc, not rank 2 shape [1,x] context.purge(); context.setInputArray(0, x); context.setInputArray(1, m); context.setInputArray(2, v); if(gamma != null && beta != null) { context.setInputArray(3, gamma.rank() == 2 ? gamma.reshape(gamma.length()) : gamma); context.setInputArray(4, beta.rank() == 2 ? beta.reshape(beta.length()) : beta); } INDArray out = workspaceMgr.createUninitialized(ArrayType.ACTIVATIONS, x.dataType(), x.shape()); context.setOutputArray(0, out); BatchNorm bn = new BatchNorm(); Nd4j.exec(bn, context); return out; }
Example 9
Source File: NDLoss.java From deeplearning4j with Apache License 2.0 | 3 votes |
/** * Mean squared error loss function. Implements {@code (label[i] - prediction[i])^2} - i.e., squared error on a per-element basis.<br> * When averaged (using {@link LossReduce#MEAN_BY_WEIGHT} or {@link LossReduce#MEAN_BY_NONZERO_WEIGHT_COUNT} (the default))<br> * this is the mean squared error loss function.<br> * * @param label Label array (NUMERIC type) * @param predictions Predictions array (NUMERIC type) * @param weights Weights array. May be null. If null, a weight of 1.0 is used (NUMERIC type) * @param lossReduce Reduction type for the loss. See {@link LossReduce} for more details. Default: {@link LossReduce#MEAN_BY_NONZERO_WEIGHT_COUNT} * @return output Loss variable (NUMERIC type) */ public INDArray meanSquaredError(INDArray label, INDArray predictions, INDArray weights, LossReduce lossReduce) { NDValidation.validateNumerical("meanSquaredError", "label", label); NDValidation.validateNumerical("meanSquaredError", "predictions", predictions); NDValidation.validateNumerical("meanSquaredError", "weights", weights); return Nd4j.exec(new org.nd4j.linalg.api.ops.impl.loss.MeanSquaredErrorLoss(label, predictions, weights, lossReduce))[0]; }
Example 10
Source File: NDMath.java From deeplearning4j with Apache License 2.0 | 3 votes |
/** * Manhattan distance (l1 norm, l1 distance) reduction operation. The output contains the Manhattan distance for each<br> * tensor/subset along the specified dimensions:<br> * out = sum_i abs(x[i]-y[i])<br> * * @param x Input variable x (NUMERIC type) * @param y Input variable y (NUMERIC type) * @param dimensions Dimensions to calculate manhattanDistance over (Size: AtLeast(min=0)) * @return output Output variable (NUMERIC type) */ public INDArray manhattanDistance(INDArray x, INDArray y, int... dimensions) { NDValidation.validateNumerical("manhattanDistance", "x", x); NDValidation.validateNumerical("manhattanDistance", "y", y); Preconditions.checkArgument(dimensions.length >= 0, "dimensions has incorrect size/length. Expected: dimensions.length >= 0, got %s", dimensions.length); return Nd4j.exec(new org.nd4j.linalg.api.ops.impl.reduce3.ManhattanDistance(x, y, dimensions)); }
Example 11
Source File: NDBase.java From deeplearning4j with Apache License 2.0 | 3 votes |
/** * Returns a count of the number of elements that satisfy the condition (for each slice along the specified dimensions)<br> * * Note that if keepDims = true, the output variable has the same rank as the input variable,<br> * with the reduced dimensions having size 1. This can be useful for later broadcast operations (such as subtracting<br> * the mean along a dimension).<br> * Example: if input has shape [a,b,c] and dimensions=[1] then output has shape:<br> * keepDims = true: [a,1,c]<br> * keepDims = false: [a,c]<br> * * @param in Input variable (NUMERIC type) * @param condition Condition * @param keepDim If true: keep the dimensions that are reduced on (as size 1). False: remove the reduction dimensions * @param dimensions Dimensions to reduce over. If dimensions are not specified, full array reduction is performed (Size: AtLeast(min=0)) * @return output Number of elements that the condition is satisfied for (NUMERIC type) */ public INDArray matchConditionCount(INDArray in, Condition condition, boolean keepDim, int... dimensions) { NDValidation.validateNumerical("matchConditionCount", "in", in); Preconditions.checkArgument(dimensions.length >= 0, "dimensions has incorrect size/length. Expected: dimensions.length >= 0, got %s", dimensions.length); return Nd4j.exec(new org.nd4j.linalg.api.ops.impl.reduce.longer.MatchCondition(in, condition, keepDim, dimensions)); }
Example 12
Source File: NDMath.java From deeplearning4j with Apache License 2.0 | 2 votes |
/** * Scalar reverse subtraction operation, out = scalar - in<br> * * @param x Input variable (NUMERIC type) * @param value Scalar value for op * @return output Output variable (NUMERIC type) */ public INDArray rsub(INDArray x, double value) { NDValidation.validateNumerical("rsub", "x", x); return Nd4j.exec(new org.nd4j.linalg.api.ops.impl.scalar.ScalarReverseSubtraction(x, value)); }
Example 13
Source File: NDBase.java From deeplearning4j with Apache License 2.0 | 2 votes |
/** * Norm1 (L1 norm) reduction operation: The output contains the L1 norm for each tensor/subset along the specified dimensions: <br> * out = sum_i abs(x[i])<br> * * Note that if keepDims = true, the output variable has the same rank as the input variable,<br> * with the reduced dimensions having size 1. This can be useful for later broadcast operations (such as subtracting<br> * the mean along a dimension).<br> * Example: if input has shape [a,b,c] and dimensions=[1] then output has shape:<br> * keepDims = true: [a,1,c]<br> * keepDims = false: [a,c]<br> * * @param x Input variable (NUMERIC type) * @param keepDims If true: keep the dimensions that are reduced on (as size 1). False: remove the reduction dimensions * @param dimensions dimensions to reduce over (Size: AtLeast(min=0)) * @return output Output variable (NUMERIC type) */ public INDArray norm1(INDArray x, boolean keepDims, int... dimensions) { NDValidation.validateNumerical("norm1", "x", x); Preconditions.checkArgument(dimensions.length >= 0, "dimensions has incorrect size/length. Expected: dimensions.length >= 0, got %s", dimensions.length); return Nd4j.exec(new org.nd4j.linalg.api.ops.impl.reduce.floating.Norm1(x, keepDims, dimensions)); }
Example 14
Source File: NDLinalg.java From deeplearning4j with Apache License 2.0 | 2 votes |
/** * Upper triangle of an array. Return a copy of a input tensor with the elements below the k-th diagonal zeroed.<br> * * @param input (NUMERIC type) * @return output (FLOATING_POINT type) */ public INDArray triu(INDArray input) { NDValidation.validateNumerical("triu", "input", input); return Nd4j.exec(new org.nd4j.linalg.api.ops.custom.Triu(input, 0))[0]; }
Example 15
Source File: NDLinalg.java From deeplearning4j with Apache License 2.0 | 2 votes |
/** * Computes LU decomposition.<br> * * @param input input tensor (NUMERIC type) * @return output (FLOATING_POINT type) */ public INDArray lu(INDArray input) { NDValidation.validateNumerical("Lu", "input", input); return Nd4j.exec(new org.nd4j.linalg.api.ops.custom.Lu(input))[0]; }
Example 16
Source File: NDMath.java From deeplearning4j with Apache License 2.0 | 2 votes |
/** * Compute the 2d confusion matrix of size [numClasses, numClasses] from a pair of labels and predictions, both of<br> * which are represented as integer values.<br> * For example, if labels = [0, 1, 1], predicted = [0, 2, 1], and numClasses=4 then output is:<br> * [1, 0, 0, 0]<br> * [0, 1, 1, 0]<br> * [0, 0, 0, 0]<br> * [0, 0, 0, 0]<br> * * @param labels Labels - 1D array of integer values representing label values (NUMERIC type) * @param pred Predictions - 1D array of integer values representing predictions. Same length as labels (NUMERIC type) * @param numClasses Number of classes * @return output variable (2D, shape [numClasses, numClasses}) (NUMERIC type) */ public INDArray confusionMatrix(INDArray labels, INDArray pred, int numClasses) { NDValidation.validateNumerical("confusionMatrix", "labels", labels); NDValidation.validateNumerical("confusionMatrix", "pred", pred); return Nd4j.exec(new org.nd4j.linalg.api.ops.impl.shape.ConfusionMatrix(labels, pred, numClasses))[0]; }
Example 17
Source File: NDMath.java From deeplearning4j with Apache License 2.0 | 2 votes |
/** * Returns the pair-wise cross product of equal size arrays a and b: a x b = ||a||x||b|| sin(theta).<br> * Can take rank 1 or above inputs (of equal shapes), but note that the last dimension must have dimension 3<br> * * @param a First input (NUMERIC type) * @param b Second input (NUMERIC type) * @return output Element-wise cross product (NUMERIC type) */ public INDArray cross(INDArray a, INDArray b) { NDValidation.validateNumerical("cross", "a", a); NDValidation.validateNumerical("cross", "b", b); return Nd4j.exec(new org.nd4j.linalg.api.ops.impl.shape.Cross(a, b))[0]; }
Example 18
Source File: NDMath.java From deeplearning4j with Apache License 2.0 | 2 votes |
/** * Elementwise absolute value operation: out = abs(x)<br> * * @param x Input variable (NUMERIC type) * @return output Output variable (NUMERIC type) */ public INDArray abs(INDArray x) { NDValidation.validateNumerical("abs", "x", x); return Nd4j.exec(new org.nd4j.linalg.api.ops.impl.transforms.same.Abs(x)); }
Example 19
Source File: NDLinalg.java From deeplearning4j with Apache License 2.0 | 2 votes |
/** * Solver for systems of linear equations.<br> * * @param matrix input tensor (NUMERIC type) * @param rhs input tensor (NUMERIC type) * @param adjoint adjoint mode, defaults to False * @return output Output tensor (FLOATING_POINT type) */ public INDArray solve(INDArray matrix, INDArray rhs, boolean adjoint) { NDValidation.validateNumerical("Solve", "matrix", matrix); NDValidation.validateNumerical("Solve", "rhs", rhs); return Nd4j.exec(new org.nd4j.linalg.api.ops.custom.LinearSolve(matrix, rhs, adjoint))[0]; }
Example 20
Source File: NDMath.java From deeplearning4j with Apache License 2.0 | 2 votes |
/** * Rectified tanh operation: max(0, tanh(in))<br> * * @param x Input variable (NUMERIC type) * @return output Output variable (NUMERIC type) */ public INDArray rectifiedTanh(INDArray x) { NDValidation.validateNumerical("rectifiedTanh", "x", x); return Nd4j.exec(new org.nd4j.linalg.api.ops.impl.transforms.strict.RectifiedTanh(x)); }