Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#addi()
The following examples show how to use
org.nd4j.linalg.api.ndarray.INDArray#addi() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LongTests.java From nd4j with Apache License 2.0 | 6 votes |
@Test public void testSomething2() { // we create 2D array, total nr. of elements is 2.4B elements, > MAX_INT INDArray huge = Nd4j.create(100, 10); // we apply element-wise scalar ops, just to make sure stuff still works huge.subi(0.5).divi(2); // now we're checking different rows, they should NOT equal INDArray row0 = huge.getRow(73).assign(1.0); INDArray row1 = huge.getRow(74).assign(2.0); assertNotEquals(row0, row1); // same idea, but this code is broken: rowA and rowB will be pointing to the same offset INDArray rowA = huge.getRow(huge.rows() - 3); INDArray rowB = huge.getRow(huge.rows() - 10); // safety check, to see if we're really working on the same offset. rowA.addi(1.0); // and this fails, so rowA and rowB are pointing to the same offset, despite different getRow() arguments were used assertNotEquals(rowA, rowB); }
Example 2
Source File: HalfOpsTests.java From nd4j with Apache License 2.0 | 6 votes |
@Test public void testMetaOp5() throws Exception { INDArray exp1 = Nd4j.create(500, 500).assign(3.0f); INDArray exp2 = Nd4j.create(500, 500).assign(6.0f); INDArray exp3 = Nd4j.create(500, 500).assign(2.0f); INDArray array = Nd4j.ones(500, 500); INDArray param = Nd4j.ones(500, 500);//.reshape('f',500, 500); INDArray am = param.mul(2); assertEquals(0, ((CudaGridExecutioner) Nd4j.getExecutioner()).getQueueLength()); array.addi(am); array.divi(0.5f); Nd4j.getExecutioner().commit(); Thread.sleep(1000); assertArrayEquals(exp3.data().asFloat(), am.data().asFloat(), 0.001f); assertArrayEquals(exp2.data().asFloat(), array.data().asFloat(), 0.001f); }
Example 3
Source File: EndlessWorkspaceTests.java From deeplearning4j with Apache License 2.0 | 6 votes |
/** * This test checks for allocations within single workspace, without any spills * * @throws Exception */ @Test public void endlessTest1() { Nd4j.getWorkspaceManager().setDefaultWorkspaceConfiguration( WorkspaceConfiguration.builder().initialSize(100 * 1024L * 1024L).build()); Nd4j.getMemoryManager().togglePeriodicGc(false); AtomicLong counter = new AtomicLong(0); while (true) { try (MemoryWorkspace workspace = Nd4j.getWorkspaceManager().getAndActivateWorkspace()) { long time1 = System.nanoTime(); INDArray array = Nd4j.create(1024 * 1024); long time2 = System.nanoTime(); array.addi(1.0f); assertEquals(1.0f, array.meanNumber().floatValue(), 0.1f); if (counter.incrementAndGet() % 1000 == 0) log.info("{} iterations passed... Allocation time: {} ns", counter.get(), time2 - time1); } } }
Example 4
Source File: UpdaterTest.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testNadam() { int rows = 10; int cols = 2; NadamUpdater grad = new NadamUpdater(new Nadam()); grad.setStateViewArray(Nd4j.zeros(1, 2 * rows * cols), new long[] {rows, cols}, 'c', true); INDArray W = Nd4j.zeros(rows, cols); Distribution dist = Nd4j.getDistributions().createNormal(1e-3, 1e-3); for (int i = 0; i < W.rows(); i++) W.putRow(i, Nd4j.create(dist.sample(W.columns()))); for (int i = 0; i < 5; i++) { // String learningRates = String.valueOf("\nAdamUpdater\n " + grad.applyUpdater(W, i)).replaceAll(";", "\n"); // System.out.println(learningRates); W.addi(Nd4j.randn(rows, cols)); } }
Example 5
Source File: Nd4jVector.java From jstarcraft-ai with Apache License 2.0 | 6 votes |
@Override @Deprecated // TODO 准备与dotProduct整合 public MathVector accumulateProduct(MathMatrix leftMatrix, boolean transpose, MathVector rightVector, MathCalculator mode) { if (leftMatrix instanceof Nd4jMatrix && rightVector instanceof Nd4jVector) { Nd4jEnvironmentThread thread = EnvironmentThread.getThread(Nd4jEnvironmentThread.class); try (MemoryWorkspace workspace = thread.getSpace()) { INDArray leftArray = transpose ? Nd4jMatrix.class.cast(leftMatrix).getArray().transpose() : Nd4jMatrix.class.cast(leftMatrix).getArray(); INDArray rightArray = Nd4jVector.class.cast(rightVector).getArray(); INDArray dataArray = this.getArray(); INDArray cacheArray = Nd4j.zeros(dataArray.shape(), dataArray.ordering()); Nd4j.getBlasWrapper().gemv(one, leftArray, rightArray, zero, cacheArray); dataArray.addi(cacheArray); return this; } } else { return MathVector.super.accumulateProduct(leftMatrix, transpose, rightVector, mode); } }
Example 6
Source File: TransformOpValidation.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testStandardizeNoDeviation() { final INDArray random = Nd4j.rand(new int[]{10, 4}); for (int i = 0; i < 4; i++) { random.putScalar(1, i, 7); } final int[] axis = new int[]{1}; final INDArray means = random.mean(axis); final INDArray std = random.std(false, axis); std.addi(std.eq(0).castTo(DataType.DOUBLE)); final INDArray res = random.subColumnVector(means).divColumnVector(std); final INDArray expOut = res.norm1(); SameDiff sd = SameDiff.create(); SDVariable sdA = sd.var("a", random); SDVariable t = sd.math.standardize(sdA, axis); t.norm1("out"); String err = OpValidation.validate(new TestCase(sd) .expectedOutput("out", expOut) .gradientCheck(true)); assertNull(err, err); }
Example 7
Source File: FeatureUtil.java From nd4j with Apache License 2.0 | 5 votes |
public static void normalizeMatrix(INDArray toNormalize) { INDArray columnMeans = toNormalize.mean(0); toNormalize.subiRowVector(columnMeans); INDArray std = toNormalize.std(0); std.addi(Nd4j.scalar(1e-12)); toNormalize.diviRowVector(std); }
Example 8
Source File: OldConvolution.java From deeplearning4j with Apache License 2.0 | 5 votes |
/** * Rearrange matrix * columns into blocks * @param col the column * transposed image to convert * @param sy stride y * @param sx stride x * @param ph padding height * @param pw padding width * @param h height * @param w width * @return */ public static INDArray col2im(INDArray col, int sy, int sx, int ph, int pw, int h, int w) { //number of images long n = col.size(0); //number of columns long c = col.size(1); //kernel height long kh = col.size(2); //kernel width long kw = col.size(3); //out height long outH = col.size(4); //out width long outW = col.size(5); INDArray img = Nd4j.create(n, c, h + 2 * ph + sy - 1, w + 2 * pw + sx - 1); for (int i = 0; i < kh; i++) { //iterate over the kernel rows long iLim = i + sy * outH; for (int j = 0; j < kw; j++) { //iterate over the kernel columns long jLim = j + sx * outW; INDArrayIndex[] indices = new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(i, sy, iLim), NDArrayIndex.interval(j, sx, jLim)}; INDArray get = img.get(indices); INDArray colAdd = col.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(i), NDArrayIndex.point(j), NDArrayIndex.all(), NDArrayIndex.all()); get.addi(colAdd); img.put(indices, get); } } //return the subset of the padded image relative to the height/width of the image and the padding width/height return img.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(ph, ph + h), NDArrayIndex.interval(pw, pw + w)); }
Example 9
Source File: FeatureUtil.java From deeplearning4j with Apache License 2.0 | 5 votes |
public static void normalizeMatrix(INDArray toNormalize) { INDArray columnMeans = toNormalize.mean(0); toNormalize.subiRowVector(columnMeans); INDArray std = toNormalize.std(0); std.addi(Nd4j.scalar(1e-12)); toNormalize.diviRowVector(std); }
Example 10
Source File: ElementWiseMultiplicationLayer.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { //If this layer is layer L, then epsilon for this layer is ((w^(L+1)*(delta^(L+1))^T))^T (or equivalent) INDArray z = preOutput(true, workspaceMgr); //Note: using preOutput(INDArray) can't be used as this does a setInput(input) and resets the 'appliedDropout' flag INDArray delta = layerConf().getActivationFn().backprop(z, epsilon).getFirst(); //TODO handle activation function params if (maskArray != null) { applyMask(delta); } INDArray input = this.input.castTo(dataType); Gradient ret = new DefaultGradient(); INDArray weightGrad = gradientViews.get(ElementWiseParamInitializer.WEIGHT_KEY); weightGrad.subi(weightGrad); weightGrad.addi(input.mul(delta).sum(0)); INDArray biasGrad = gradientViews.get(ElementWiseParamInitializer.BIAS_KEY); delta.sum(biasGrad, 0); //biasGrad is initialized/zeroed first ret.gradientForVariable().put(ElementWiseParamInitializer.WEIGHT_KEY, weightGrad); ret.gradientForVariable().put(ElementWiseParamInitializer.BIAS_KEY, biasGrad); // epsilonNext is a 2d matrix INDArray epsilonNext = delta.mulRowVector(params.get(ElementWiseParamInitializer.WEIGHT_KEY)); epsilonNext = workspaceMgr.leverageTo(ArrayType.ACTIVATION_GRAD, epsilonNext); epsilonNext = backpropDropOutIfPresent(epsilonNext); return new Pair<>(ret, epsilonNext); }
Example 11
Source File: ReductionOpValidation.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testDotProductAttentionWithMask(){ final INDArray keys = Nd4j.rand(new int[]{10, 4, 3}); final INDArray values = Nd4j.rand(new int[]{10, 4, 3}); final INDArray query = Nd4j.rand(new int[]{10, 4, 1}); final INDArray mask = Nd4j.rand(10, 3).gte(0.2).castTo(DataType.DOUBLE); final INDArray exec = Nd4j.matmul(keys, query, true, false, false) .divi(Math.sqrt(keys.size(1))); exec.addi(mask.reshape(10, 3, 1).sub(1).muli(1e9)); Nd4j.exec((CustomOp) new SoftMax(exec, exec, 1)); final INDArray finalOut = Nd4j.matmul(values, exec).norm1(); SameDiff sd = SameDiff.create(); SDVariable sdQ = sd.var("q", query); SDVariable sdK = sd.var("k", keys); SDVariable sdV = sd.var("v", values); SDVariable sdMask = sd.constant("mask", mask); SDVariable t = sd.nn.dotProductAttention(sdQ, sdK, sdV, sdMask, true); t.norm1("out"); String err = OpValidation.validate(new TestCase(sd) .expectedOutput("out", finalOut) .gradCheckSkipVariables("mask") .gradientCheck(true)); assertNull(err); }
Example 12
Source File: NormalizerStandardizeTest.java From nd4j with Apache License 2.0 | 5 votes |
public genRandomDataSet(int nSamples, int nFeatures, int a, int b, long randSeed) { /* if a =1 and b = 0,normal distribution otherwise with some random mean and some random distribution */ int i = 0; // Randomly generate scaling constants and add offsets // to get aA and bB INDArray aA = a == 1 ? Nd4j.ones(1, nFeatures) : Nd4j.rand(1, nFeatures, randSeed).mul(a); //a = 1, don't scale INDArray bB = Nd4j.rand(1, nFeatures, randSeed).mul(b); //b = 0 this zeros out // transform ndarray as X = aA + bB * X INDArray randomFeatures = Nd4j.zeros(nSamples, nFeatures); INDArray randomFeaturesTransform = Nd4j.zeros(nSamples, nFeatures); while (i < nFeatures) { INDArray randomSlice = Nd4j.randn(nSamples, 1, randSeed); randomFeaturesTransform.putColumn(i, randomSlice); randomSlice.muli(aA.getScalar(0, i)); randomSlice.addi(bB.getScalar(0, i)); randomFeatures.putColumn(i, randomSlice); i++; } INDArray randomLabels = Nd4j.zeros(nSamples, 1); this.sampleDataSet = new DataSet(randomFeatures, randomLabels); this.theoreticalTransform = new DataSet(randomFeaturesTransform, randomLabels); this.theoreticalMean = bB; this.theoreticalStd = aA; this.theoreticalSEM = this.theoreticalStd.div(Math.sqrt(nSamples)); }
Example 13
Source File: ShapeOpValidation.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testTileBp2(){ Nd4j.getRandom().setSeed(12345); INDArray in = Nd4j.create(3,4,5); //Values aren't used in backprop, just shape int[] tile = new int[]{2,3,4}; int[] outShape = new int[]{3*2, 4*3, 5*4}; int length = ArrayUtil.prod(outShape); INDArray gradAtOut = Nd4j.rand(outShape); INDArray gradAtInExp = Nd4j.create(in.shape()); for(int i=0; i<tile[0]; i++ ){ for( int j=0; j<tile[1]; j++){ for( int k=0; k<tile[2]; k++ ){ INDArray subset = gradAtOut.get(NDArrayIndex.interval(i*3, (i+1)*3), NDArrayIndex.interval(j*4, (j+1)*4), NDArrayIndex.interval(k*5, (k+1)*5)); gradAtInExp.addi(subset); } } } DynamicCustomOp op = DynamicCustomOp.builder("tile_bp") .addInputs(in, gradAtOut) .addOutputs(gradAtInExp) .addIntegerArguments(tile) .build(); OpTestCase otc = new OpTestCase(op) .expectedOutput(0, gradAtInExp); String err = OpValidation.validate(otc); assertNull(err); }
Example 14
Source File: OldConvolution.java From nd4j with Apache License 2.0 | 5 votes |
/** * Rearrange matrix * columns into blocks * @param col the column * transposed image to convert * @param sy stride y * @param sx stride x * @param ph padding height * @param pw padding width * @param h height * @param w width * @return */ public static INDArray col2im(INDArray col, int sy, int sx, int ph, int pw, int h, int w) { //number of images long n = col.size(0); //number of columns long c = col.size(1); //kernel height long kh = col.size(2); //kernel width long kw = col.size(3); //out height long outH = col.size(4); //out width long outW = col.size(5); INDArray img = Nd4j.create(n, c, h + 2 * ph + sy - 1, w + 2 * pw + sx - 1); for (int i = 0; i < kh; i++) { //iterate over the kernel rows long iLim = i + sy * outH; for (int j = 0; j < kw; j++) { //iterate over the kernel columns long jLim = j + sx * outW; INDArrayIndex[] indices = new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(i, sy, iLim), NDArrayIndex.interval(j, sx, jLim)}; INDArray get = img.get(indices); INDArray colAdd = col.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(i), NDArrayIndex.point(j), NDArrayIndex.all(), NDArrayIndex.all()); get.addi(colAdd); img.put(indices, get); } } //return the subset of the padded image relative to the height/width of the image and the padding width/height return img.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(ph, ph + h), NDArrayIndex.interval(pw, pw + w)); }
Example 15
Source File: NDArrayTestsFortran.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testAddMatrix() { INDArray five = Nd4j.ones(5); five.addi(five.dup()); INDArray twos = Nd4j.valueArrayOf(5, 2); assertEquals(getFailureMessage(), twos, five); }
Example 16
Source File: PLNetDyadRanker.java From AILibs with GNU Affero General Public License v3.0 | 5 votes |
/** * Updates this {@link PLNetDyadRanker} based on a given mini batch of * {@link INDarray}s representing dyad rankings. * * @param minibatch * A mini batch consisting of a {@link List} of {@link INDarray}. */ private void updateWithMinibatch(final List<INDArray> minibatch) { double actualMiniBatchSize = minibatch.size(); INDArray cumulativeDeltaW = Nd4j.zeros(this.plNet.params().length()); for (INDArray instance : minibatch) { cumulativeDeltaW.addi(this.computeScaledGradient(instance)); } cumulativeDeltaW.muli(1 / actualMiniBatchSize); this.plNet.params().subi(cumulativeDeltaW); this.iteration++; }
Example 17
Source File: CompositeReconstructionDistribution.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public INDArray exampleNegLogProbability(INDArray x, INDArray preOutDistributionParams) { int inputSoFar = 0; int paramsSoFar = 0; INDArray exampleLogProbSum = null; for (int i = 0; i < distributionSizes.length; i++) { int thisInputSize = distributionSizes[i]; int thisParamsSize = reconstructionDistributions[i].distributionInputSize(thisInputSize); INDArray inputSubset = x.get(NDArrayIndex.all(), NDArrayIndex.interval(inputSoFar, inputSoFar + thisInputSize)); INDArray paramsSubset = preOutDistributionParams.get(NDArrayIndex.all(), NDArrayIndex.interval(paramsSoFar, paramsSoFar + thisParamsSize)); if (i == 0) { exampleLogProbSum = reconstructionDistributions[i].exampleNegLogProbability(inputSubset, paramsSubset); } else { exampleLogProbSum.addi( reconstructionDistributions[i].exampleNegLogProbability(inputSubset, paramsSubset)); } inputSoFar += thisInputSize; paramsSoFar += thisParamsSize; } return exampleLogProbSum; }
Example 18
Source File: NDArrayTestsFortran.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testSmallSum() { INDArray base = Nd4j.create(new double[] {5.843333333333335, 3.0540000000000007}); base.addi(1e-12); INDArray assertion = Nd4j.create(new double[] {5.84333433, 3.054001}); assertEquals(assertion, base); }
Example 19
Source File: NDArrayTestsFortran.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testScalarOps() { INDArray n = Nd4j.create(Nd4j.ones(27).data(), new long[] {3, 3, 3}); assertEquals(27d, n.length(), 1e-1); n.addi(Nd4j.scalar(1d)); n.subi(Nd4j.scalar(1.0d)); n.muli(Nd4j.scalar(1.0d)); n.divi(Nd4j.scalar(1.0d)); n = Nd4j.create(Nd4j.ones(27).data(), new long[] {3, 3, 3}); assertEquals(27, n.sumNumber().doubleValue(), 1e-1); INDArray a = n.slice(2); assertEquals(true, Arrays.equals(new long[] {3, 3}, a.shape())); }
Example 20
Source File: GravesBidirectionalLSTM.java From deeplearning4j with Apache License 2.0 | 4 votes |
private INDArray activateOutput(final boolean training, boolean forBackprop, LayerWorkspaceMgr workspaceMgr) { assertInputSet(false); final FwdPassReturn forwardsEval; final FwdPassReturn backwardsEval; if (cacheMode != CacheMode.NONE && cachedPassForward != null && cachedPassBackward != null) { // restore from cache. but this coll will probably never happen forwardsEval = cachedPassForward; backwardsEval = cachedPassBackward; cachedPassBackward = null; cachedPassForward = null; } else { forwardsEval = LSTMHelpers.activateHelper(this, this.conf, this.layerConf().getGateActivationFn(), permuteIfNWC(this.input), getParam(GravesBidirectionalLSTMParamInitializer.RECURRENT_WEIGHT_KEY_FORWARDS), getParam(GravesBidirectionalLSTMParamInitializer.INPUT_WEIGHT_KEY_FORWARDS), getParam(GravesBidirectionalLSTMParamInitializer.BIAS_KEY_FORWARDS), training, null, null, forBackprop || (cacheMode != CacheMode.NONE && training), true, GravesBidirectionalLSTMParamInitializer.INPUT_WEIGHT_KEY_FORWARDS, maskArray, true, null, forBackprop ? cacheMode : CacheMode.NONE, workspaceMgr, layerConf().isHelperAllowFallback()); backwardsEval = LSTMHelpers.activateHelper(this, this.conf, this.layerConf().getGateActivationFn(), permuteIfNWC(this.input), getParam(GravesBidirectionalLSTMParamInitializer.RECURRENT_WEIGHT_KEY_BACKWARDS), getParam(GravesBidirectionalLSTMParamInitializer.INPUT_WEIGHT_KEY_BACKWARDS), getParam(GravesBidirectionalLSTMParamInitializer.BIAS_KEY_BACKWARDS), training, null, null, forBackprop || (cacheMode != CacheMode.NONE && training), false, GravesBidirectionalLSTMParamInitializer.INPUT_WEIGHT_KEY_BACKWARDS, maskArray, true, null, forBackprop ? cacheMode : CacheMode.NONE, workspaceMgr, layerConf().isHelperAllowFallback()); forwardsEval.fwdPassOutput = permuteIfNWC(forwardsEval.fwdPassOutput); backwardsEval.fwdPassOutput = permuteIfNWC(backwardsEval.fwdPassOutput); cachedPassForward = forwardsEval; cachedPassBackward = backwardsEval; } //sum outputs final INDArray fwdOutput = forwardsEval.fwdPassOutput; final INDArray backOutput = backwardsEval.fwdPassOutput; // if we're on ff pass & cache enabled - we should not modify fwdOutput, and for backprop pass - we don't care final INDArray totalOutput = training && cacheMode != CacheMode.NONE && !forBackprop ? fwdOutput.add(backOutput) : fwdOutput.addi(backOutput); return totalOutput; }