Java Code Examples for org.nd4j.linalg.indexing.BooleanIndexing#replaceWhere()
The following examples show how to use
org.nd4j.linalg.indexing.BooleanIndexing#replaceWhere() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseUnderSamplingPreProcessor.java From deeplearning4j with Apache License 2.0 | 6 votes |
private INDArray calculateBernoulli(INDArray minorityLabels, INDArray labelMask, double targetMinorityDist) { INDArray minorityClass = minorityLabels.castTo(Nd4j.defaultFloatingPointType()).muli(labelMask); INDArray majorityClass = minorityLabels.rsub(1.0).muli(labelMask); //rsub(1.0) is equivalent to swapping 0s and 1s //all minorityLabel class, keep masks as is //presence of minoriy class and donotmask minority windows set to true return label as is if (majorityClass.sumNumber().intValue() == 0 || (minorityClass.sumNumber().intValue() > 0 && donotMaskMinorityWindows)) return labelMask; //all majority class and set to not mask all majority windows sample majority class by 1-targetMinorityDist if (minorityClass.sumNumber().intValue() == 0 && !maskAllMajorityWindows) return labelMask.muli(1 - targetMinorityDist); //Probabilities to be used for bernoulli sampling INDArray minoritymajorityRatio = minorityClass.sum(1).div(majorityClass.sum(1)); INDArray majorityBernoulliP = minoritymajorityRatio.muli(1 - targetMinorityDist).divi(targetMinorityDist); BooleanIndexing.replaceWhere(majorityBernoulliP, 1.0, Conditions.greaterThan(1.0)); //if minority ratio is already met round down to 1.0 return majorityClass.muliColumnVector(majorityBernoulliP).addi(minorityClass); }
Example 2
Source File: TransformOpValidation.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testReplaceWhereScalar() { for (Condition c : new Condition[]{Conditions.lessThan(0.5), Conditions.greaterThan(0.5), Conditions.equals(0.5)}) { log.info("Testing condition: " + c.getClass().getSimpleName()); INDArray inArr = Nd4j.rand(DataType.DOUBLE, 3, 4); SameDiff sd = SameDiff.create(); SDVariable in = sd.var("in", inArr); SDVariable where = sd.replaceWhere(in, 10, c); INDArray exp = inArr.dup(); BooleanIndexing.replaceWhere(exp, 10, c); SDVariable loss = where.std(true); TestCase tc = new TestCase(sd); String err = OpValidation.validate(tc); assertNull(err); } }
Example 3
Source File: ActivationRReLU.java From nd4j with Apache License 2.0 | 6 votes |
@Override public INDArray getActivation(INDArray in, boolean training) { if (training) { try(MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) { this.alpha = Nd4j.rand(in.shape(), l, u, Nd4j.getRandom()); } INDArray inTimesAlpha = in.mul(alpha); BooleanIndexing.replaceWhere(in, inTimesAlpha, Conditions.lessThan(0)); } else { this.alpha = null; double a = 0.5 * (l + u); return Nd4j.getExecutioner().execAndReturn(new RectifedLinear(in, a)); } return in; }
Example 4
Source File: LayerHelperValidationUtil.java From deeplearning4j with Apache License 2.0 | 6 votes |
private static INDArray relError(@NonNull INDArray a1, @NonNull INDArray a2, double minAbsError){ long numNaN1 = Nd4j.getExecutioner().exec(new MatchCondition(a1, Conditions.isNan(), Integer.MAX_VALUE)).getInt(0); long numNaN2 = Nd4j.getExecutioner().exec(new MatchCondition(a2, Conditions.isNan(), Integer.MAX_VALUE)).getInt(0); Preconditions.checkState(numNaN1 == 0, "Array 1 has NaNs"); Preconditions.checkState(numNaN2 == 0, "Array 2 has NaNs"); INDArray abs1 = Transforms.abs(a1, true); INDArray abs2 = Transforms.abs(a2, true); INDArray absDiff = Transforms.abs(a1.sub(a2), false); //abs(a1-a2) < minAbsError ? 1 : 0 INDArray greaterThanMinAbs = Transforms.abs(a1.sub(a2), false); BooleanIndexing.replaceWhere(greaterThanMinAbs, 0.0, Conditions.lessThan(minAbsError)); BooleanIndexing.replaceWhere(greaterThanMinAbs, 1.0, Conditions.greaterThan(0.0)); INDArray result = absDiff.divi(abs1.add(abs2)); //Only way to have NaNs given there weren't any in original : both 0s BooleanIndexing.replaceWhere(result, 0.0, Conditions.isNan()); //Finally, set to 0 if less than min abs error, or unchanged otherwise result.muli(greaterThanMinAbs); return result; }
Example 5
Source File: RandomProjectionLSH.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public INDArray bucket(INDArray query) { INDArray queryRes = rawBucketOf(query); if(numTables > 1) { INDArray entropyQueries = entropy(query); // loop, addi + conditionalreplace -> poor man's OR function for (int i = 0; i < numTables; i++) { INDArray row = entropyQueries.getRow(i, true); queryRes.addi(rawBucketOf(row)); } BooleanIndexing.replaceWhere(queryRes, 1.0, Conditions.greaterThan(0.0)); } return queryRes; }
Example 6
Source File: LossMCXENT.java From deeplearning4j with Apache License 2.0 | 5 votes |
protected INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) { if(!labels.equalShapes(preOutput)){ Preconditions.throwEx("Labels and preOutput must have equal shapes: got shapes %s vs %s", labels.shape(), preOutput.shape()); } labels = labels.castTo(preOutput.dataType()); //No-op if already correct dtype INDArray output = activationFn.getActivation(preOutput.dup(), true); if(activationFn instanceof ActivationSoftmax && softmaxClipEps > 0.0){ BooleanIndexing.replaceWhere(output, softmaxClipEps, Conditions.lessThan(softmaxClipEps)); BooleanIndexing.replaceWhere(output, 1.0-softmaxClipEps, Conditions.greaterThan(1.0-softmaxClipEps)); } INDArray scoreArr = Transforms.log(output, false).muli(labels); //Weighted loss function if (weights != null) { if (weights.length() != scoreArr.size(1)) { throw new IllegalStateException("Weights vector (length " + weights.length() + ") does not match output.size(1)=" + preOutput.size(1)); } scoreArr.muliRowVector(weights.castTo(scoreArr.dataType())); } if (mask != null) { LossUtil.applyMask(scoreArr, mask); } return scoreArr; }
Example 7
Source File: StandardizeStrategy.java From deeplearning4j with Apache License 2.0 | 5 votes |
private static INDArray filteredStd(DistributionStats stats) { /* To avoid division by zero when the std deviation is zero, replace zeros by one */ INDArray stdCopy = stats.getStd(); BooleanIndexing.replaceWhere(stdCopy, 1.0, Conditions.equals(0)); return stdCopy; }
Example 8
Source File: CuDNNValidationUtil.java From deeplearning4j with Apache License 2.0 | 5 votes |
private static INDArray relError(@NonNull INDArray a1, @NonNull INDArray a2, double minAbsError){ long numNaN1 = Nd4j.getExecutioner().exec(new MatchCondition(a1, Conditions.isNan(), Integer.MAX_VALUE)).getInt(0); long numNaN2 = Nd4j.getExecutioner().exec(new MatchCondition(a2, Conditions.isNan(), Integer.MAX_VALUE)).getInt(0); Preconditions.checkState(numNaN1 == 0, "Array 1 has NaNs"); Preconditions.checkState(numNaN2 == 0, "Array 2 has NaNs"); // INDArray isZero1 = a1.eq(0.0); // INDArray isZero2 = a2.eq(0.0); // INDArray bothZero = isZero1.muli(isZero2); INDArray abs1 = Transforms.abs(a1, true); INDArray abs2 = Transforms.abs(a2, true); INDArray absDiff = Transforms.abs(a1.sub(a2), false); //abs(a1-a2) < minAbsError ? 1 : 0 INDArray greaterThanMinAbs = Transforms.abs(a1.sub(a2), false); BooleanIndexing.replaceWhere(greaterThanMinAbs, 0.0, Conditions.lessThan(minAbsError)); BooleanIndexing.replaceWhere(greaterThanMinAbs, 1.0, Conditions.greaterThan(0.0)); INDArray result = absDiff.divi(abs1.add(abs2)); //Only way to have NaNs given there weren't any in original : both 0s BooleanIndexing.replaceWhere(result, 0.0, Conditions.isNan()); //Finally, set to 0 if less than min abs error, or unchanged otherwise result.muli(greaterThanMinAbs); // double maxRE = result.maxNumber().doubleValue(); // if(maxRE > t.maxRe){ // System.out.println(); // } return result; }
Example 9
Source File: LossHinge.java From nd4j with Apache License 2.0 | 5 votes |
@Override public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) { if (labels.size(1) != preOutput.size(1)) { throw new IllegalArgumentException( "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer" + " number of outputs (nOut = " + preOutput.size(1) + ") "); } /* gradient is 0 if yhaty is >= 1 else gradient is gradient of the loss function = (1-yhaty) wrt preOutput = -y*derivative_of_yhat wrt preout */ INDArray bitMaskRowCol = scoreArray(labels, preOutput, activationFn, mask); /* bit mask is 0 if 1-sigma(y*yhat) is neg bit mask is 1 if 1-sigma(y*yhat) is +ve */ BooleanIndexing.replaceWhere(bitMaskRowCol, 0.0, Conditions.lessThan(0.0)); BooleanIndexing.replaceWhere(bitMaskRowCol, 1.0, Conditions.greaterThan(0.0)); INDArray dLda = labels.neg().muli(bitMaskRowCol); if (mask != null && LossUtil.isPerOutputMasking(dLda, mask)) { //For *most* activation functions: we don't actually need to mask dL/da in addition to masking dL/dz later //but: some, like softmax, require both (due to dL/dz_i being a function of dL/da_j, for i != j) //We could add a special case for softmax (activationFn instanceof ActivationSoftmax) but that would be // error prone - though buy us a tiny bit of performance LossUtil.applyMask(dLda, mask); } INDArray gradients = activationFn.backprop(preOutput, dLda).getFirst(); //TODO activation functions with parameters if (mask != null) { LossUtil.applyMask(gradients, mask); } return gradients; }
Example 10
Source File: ActivationRReLU.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public Pair<INDArray, INDArray> backprop(INDArray in, INDArray epsilon) { assertShape(in, epsilon); INDArray dLdz = Nd4j.ones(in.shape()); BooleanIndexing.replaceWhere(dLdz, alpha, Conditions.lessThanOrEqual(0.0)); dLdz.muli(epsilon); return new Pair<>(dLdz, null); }
Example 11
Source File: LossMCXENT.java From nd4j with Apache License 2.0 | 5 votes |
private INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) { if (labels.size(1) != preOutput.size(1)) { throw new IllegalArgumentException( "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer" + " number of outputs (nOut = " + preOutput.size(1) + ") "); } INDArray output = activationFn.getActivation(preOutput.dup(), true); if(activationFn instanceof ActivationSoftmax && softmaxClipEps > 0.0){ BooleanIndexing.replaceWhere(output, softmaxClipEps, Conditions.lessThan(softmaxClipEps)); BooleanIndexing.replaceWhere(output, 1.0-softmaxClipEps, Conditions.greaterThan(1.0-softmaxClipEps)); } INDArray scoreArr = Transforms.log(output, false).muli(labels); //Weighted loss function if (weights != null) { if (weights.length() != scoreArr.size(1)) { throw new IllegalStateException("Weights vector (length " + weights.length() + ") does not match output.size(1)=" + preOutput.size(1)); } scoreArr.muliRowVector(weights); } if (mask != null) { LossUtil.applyMask(scoreArr, mask); } return scoreArr; }
Example 12
Source File: ActivationELU.java From nd4j with Apache License 2.0 | 5 votes |
@Override public INDArray getActivation(INDArray in, boolean training) { // no support in ELU native to override alpha if (this.alpha != 1.00) { INDArray alphaMultiple = Nd4j.getExecutioner().execAndReturn(new ELU(in.dup())); alphaMultiple.muli(alpha); BooleanIndexing.replaceWhere(in, alphaMultiple, Conditions.lessThan(0)); } else { Nd4j.getExecutioner().execAndReturn(new ELU(in)); } return in; }
Example 13
Source File: MaxNormConstraint.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public void apply(INDArray param){ INDArray norm = param.norm2(dimensions); INDArray clipped = norm.unsafeDuplication(); BooleanIndexing.replaceWhere(clipped, maxNorm, Conditions.greaterThan(maxNorm)); norm.addi(epsilon); clipped.divi(norm); Broadcast.mul(param, clipped, param, getBroadcastDims(dimensions, param.rank()) ); }
Example 14
Source File: StandardizeStrategy.java From nd4j with Apache License 2.0 | 5 votes |
private static INDArray filteredStd(DistributionStats stats) { /* To avoid division by zero when the std deviation is zero, replace zeros by one */ INDArray stdCopy = stats.getStd(); BooleanIndexing.replaceWhere(stdCopy, 1.0, Conditions.equals(0)); return stdCopy; }
Example 15
Source File: LossSquaredHinge.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) { if(!labels.equalShapes(preOutput)){ Preconditions.throwEx("Labels and preOutput must have equal shapes: got shapes %s vs %s", labels.shape(), preOutput.shape()); } labels = labels.castTo(preOutput.dataType()); //No-op if already correct dtype INDArray scoreArr = scoreArray(labels, preOutput, activationFn, mask); INDArray bitMaskRowCol = scoreArr.dup(); /* bit mask is 0 if 1-sigma(y*yhat) is neg, bit mask is 1 if 1-sigma(y*yhat) is +ve */ BooleanIndexing.replaceWhere(bitMaskRowCol, 0.0, Conditions.lessThan(0.0)); BooleanIndexing.replaceWhere(bitMaskRowCol, 1.0, Conditions.greaterThan(0.0)); INDArray dLda = scoreArr.muli(2).muli(labels.neg()); dLda.muli(bitMaskRowCol); if (mask != null && LossUtil.isPerOutputMasking(dLda, mask)) { //For *most* activation functions: we don't actually need to mask dL/da in addition to masking dL/dz later //but: some, like softmax, require both (due to dL/dz_i being a function of dL/da_j, for i != j) //We could add a special case for softmax (activationFn instanceof ActivationSoftmax) but that would be // error prone - though buy us a tiny bit of performance LossUtil.applyMask(dLda, mask); } INDArray gradients = activationFn.backprop(preOutput, dLda).getFirst(); //TODO activation functions with params if (mask != null) { LossUtil.applyMask(gradients, mask); } return gradients; }
Example 16
Source File: MaskedReductionUtil.java From deeplearning4j with Apache License 2.0 | 4 votes |
public static INDArray maskedPoolingEpsilonCnn(PoolingType poolingType, INDArray input, INDArray mask, INDArray epsilon2d, int pnorm, DataType dataType) { // [minibatch, channels, h=1, w=X] or [minibatch, channels, h=X, w=1] data // with a mask array of shape [minibatch, X] //If masking along height: broadcast dimensions are [0,2] //If masking along width: broadcast dimensions are [0,3] mask = mask.castTo(dataType); //No-op if correct type //General case: must be equal or 1 on each dimension int[] dimensions = new int[4]; int count = 0; for(int i=0; i<4; i++ ){ if(input.size(i) == mask.size(i)){ dimensions[count++] = i; } } if(count < 4){ dimensions = Arrays.copyOfRange(dimensions, 0, count); } switch (poolingType) { case MAX: //TODO This is ugly - replace it with something better... Need something like a Broadcast CAS op INDArray negInfMask; if(mask.dataType() == DataType.BOOL){ negInfMask = Transforms.not(mask).castTo(dataType); } else { negInfMask = mask.rsub(1.0); } BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0)); INDArray withInf = Nd4j.createUninitialized(dataType, input.shape()); Nd4j.getExecutioner().exec(new BroadcastAddOp(input, negInfMask, withInf, dimensions)); //At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op INDArray isMax = Nd4j.exec(new IsMax(withInf, withInf.ulike(), 2, 3))[0]; return Nd4j.getExecutioner().exec(new BroadcastMulOp(isMax, epsilon2d, isMax, 0, 1)); case AVG: case SUM: //if out = sum(in,dims) then dL/dIn = dL/dOut -> duplicate to each step and mask //if out = avg(in,dims) then dL/dIn = 1/N * dL/dOut //With masking: N differs for different time series INDArray out = Nd4j.createUninitialized(dataType, input.shape(), 'f'); //Broadcast copy op, then divide and mask to 0 as appropriate Nd4j.getExecutioner().exec(new BroadcastCopyOp(out, epsilon2d, out, 0, 1)); Nd4j.getExecutioner().exec(new BroadcastMulOp(out, mask, out, dimensions)); if (poolingType == PoolingType.SUM) { return out; } //Note that with CNNs, current design is restricted to [minibatch, channels, 1, W] ot [minibatch, channels, H, 1] INDArray nEachTimeSeries = mask.sum(1,2,3); //[minibatchSize,tsLength] -> [minibatchSize,1] Nd4j.getExecutioner().exec(new BroadcastDivOp(out, nEachTimeSeries, out, 0)); return out; case PNORM: //Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0 INDArray masked2 = Nd4j.createUninitialized(dataType, input.shape()); Nd4j.getExecutioner().exec(new BroadcastMulOp(input, mask, masked2, dimensions)); INDArray abs = Transforms.abs(masked2, true); Transforms.pow(abs, pnorm, false); INDArray pNorm = Transforms.pow(abs.sum(2, 3), 1.0 / pnorm); INDArray numerator; if (pnorm == 2) { numerator = input.dup(); } else { INDArray absp2 = Transforms.pow(Transforms.abs(input, true), pnorm - 2, false); numerator = input.mul(absp2); } INDArray denom = Transforms.pow(pNorm, pnorm - 1, false); denom.rdivi(epsilon2d); Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(numerator, denom, numerator, 0, 1)); Nd4j.getExecutioner().exec(new BroadcastMulOp(numerator, mask, numerator, dimensions)); //Apply mask return numerator; default: throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType); } }
Example 17
Source File: LossHinge.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public INDArray computeScoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) { INDArray scoreArr = scoreArray(labels, preOutput, activationFn, mask); BooleanIndexing.replaceWhere(scoreArr, 0.0, Conditions.lessThan(0.0));//max(0,1-y*yhat) return scoreArr.sum(true,1); }
Example 18
Source File: TestRandomProjection.java From deeplearning4j with Apache License 2.0 | 4 votes |
private void makeRandomSparseData(int[] shape, double density) { INDArray z1 = Nd4j.rand(shape); // because this is rand with mean = 0, stdev = 1, abslessThan ~= density BooleanIndexing.replaceWhere(z1, 0.0, Conditions.absLessThan(density)); }
Example 19
Source File: MaskedReductionUtil.java From deeplearning4j with Apache License 2.0 | 4 votes |
public static INDArray maskedPoolingTimeSeries(PoolingType poolingType, INDArray toReduce, INDArray mask, int pnorm, DataType dataType) { if (toReduce.rank() != 3) { throw new IllegalArgumentException("Expect rank 3 array: got " + toReduce.rank()); } if (mask.rank() != 2) { throw new IllegalArgumentException("Expect rank 2 array for mask: got " + mask.rank()); } toReduce = toReduce.castTo(dataType); mask = mask.castTo(dataType); //Sum pooling: easy. Multiply by mask, then sum as normal //Average pooling: as above, but do a broadcast element-wise divi by mask.sum(1) //Max pooling: set to -inf if mask is 0, then do max as normal switch (poolingType) { case MAX: INDArray negInfMask = mask.castTo(dataType).rsub(1.0); BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0)); INDArray withInf = Nd4j.createUninitialized(dataType, toReduce.shape()); Nd4j.getExecutioner().exec(new BroadcastAddOp(toReduce, negInfMask, withInf, 0, 2)); //At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op return withInf.max(2); case AVG: case SUM: INDArray masked = Nd4j.createUninitialized(dataType, toReduce.shape()); Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked, 0, 2)); INDArray summed = masked.sum(2); if (poolingType == PoolingType.SUM) { return summed; } INDArray maskCounts = mask.sum(1); summed.diviColumnVector(maskCounts); return summed; case PNORM: //Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0 INDArray masked2 = Nd4j.createUninitialized(dataType, toReduce.shape()); Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked2, 0, 2)); INDArray abs = Transforms.abs(masked2, true); Transforms.pow(abs, pnorm, false); INDArray pNorm = abs.sum(2); return Transforms.pow(pNorm, 1.0 / pnorm); default: throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType); } }
Example 20
Source File: CrashTest.java From deeplearning4j with Apache License 2.0 | 2 votes |
protected void op(INDArray x, INDArray y, int i) { // broadcast along row & column INDArray row = Nd4j.ones(64); INDArray column = Nd4j.ones(1024, 1); x.addiRowVector(row); x.addiColumnVector(column); // casual scalar x.addi(i * 2); // reduction along all dimensions float sum = x.sumNumber().floatValue(); // index reduction Nd4j.getExecutioner().exec(new ArgMax(x)); // casual transform Nd4j.getExecutioner().exec(new Sqrt(x, x)); // dup INDArray x1 = x.dup(x.ordering()); INDArray x2 = x.dup(x.ordering()); INDArray x3 = x.dup('c'); INDArray x4 = x.dup('f'); // vstack && hstack INDArray vstack = Nd4j.vstack(x, x1, x2, x3, x4); INDArray hstack = Nd4j.hstack(x, x1, x2, x3, x4); // reduce3 call Nd4j.getExecutioner().exec(new ManhattanDistance(x, x2)); // flatten call INDArray flat = Nd4j.toFlattened(x, x1, x2, x3, x4); // reduction along dimension: row & column INDArray max_0 = x.max(0); INDArray max_1 = x.max(1); // index reduction along dimension: row & column INDArray imax_0 = Nd4j.argMax(x, 0); INDArray imax_1 = Nd4j.argMax(x, 1); // logisoftmax, softmax & softmax derivative Nd4j.getExecutioner().exec((CustomOp) new SoftMax(x)); Nd4j.getExecutioner().exec((CustomOp) new LogSoftMax(x)); // BooleanIndexing BooleanIndexing.replaceWhere(x, 5f, Conditions.lessThan(8f)); // assing on view BooleanIndexing.assignIf(x, x1, Conditions.greaterThan(-1000000000f)); // std var along all dimensions float std = x.stdNumber().floatValue(); // std var along row & col INDArray xStd_0 = x.std(0); INDArray xStd_1 = x.std(1); // blas call float dot = (float) Nd4j.getBlasWrapper().dot(x, x1); // mmul for (boolean tA : paramsA) { for (boolean tB : paramsB) { INDArray xT = tA ? x.dup() : x.dup().transpose(); INDArray yT = tB ? y.dup() : y.dup().transpose(); Nd4j.gemm(xT, yT, tA, tB); } } // specially for views, checking here without dup and rollover Nd4j.gemm(x, y, false, false); log.debug("Iteration passed: " + i); }