Java Code Examples for org.nd4j.linalg.factory.Nd4j#zeros()
The following examples show how to use
org.nd4j.linalg.factory.Nd4j#zeros() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SameDiffTests.java From nd4j with Apache License 2.0 | 6 votes |
@Test public void testScatterMul() { INDArray arr1 = Nd4j.ones(3, 3); INDArray arr2 = Nd4j.create(new float[]{0,1}, new long[]{2}); INDArray arr3 = Nd4j.zeros(3, 3); INDArray expected = Nd4j.create(new float[]{0, 0, 0, 0, 0, 0, 1, 1, 1}, new long[]{3, 3}); SameDiff sd = SameDiff.create(); SDVariable refs = sd.var("refs", arr1); SDVariable idxs = sd.var("idxs", arr2); SDVariable upds = sd.var("upds", arr3); SDVariable result = sd.scatterMul(refs, idxs, upds); assertArrayEquals(new long[]{3, 3}, result.eval().shape()); assertEquals(expected, result.eval()); }
Example 2
Source File: CachingDataSetIteratorTest.java From deeplearning4j with Apache License 2.0 | 6 votes |
private void runDataSetTest(DataSetCache cache) { int rows = 500; int inputColumns = 100; int outputColumns = 2; DataSet dataSet = new DataSet(Nd4j.ones(rows, inputColumns), Nd4j.zeros(rows, outputColumns)); int batchSize = 10; int totalNumberOfSamples = 50; int expectedNumberOfDataSets = totalNumberOfSamples / batchSize; DataSetIterator it = new SamplingDataSetIterator(dataSet, batchSize, totalNumberOfSamples); String namespace = "test-namespace"; CachingDataSetIterator cachedIt = new CachingDataSetIterator(it, cache, namespace); PreProcessor preProcessor = new PreProcessor(); cachedIt.setPreProcessor(preProcessor); assertDataSetCacheGetsCompleted(cache, namespace, cachedIt); assertPreProcessingGetsCached(expectedNumberOfDataSets, it, cachedIt, preProcessor); assertCachingDataSetIteratorHasAllTheData(rows, inputColumns, outputColumns, dataSet, it, cachedIt); }
Example 3
Source File: DerivativeTests.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testTanhDerivative() { //Derivative of sigmoid: ds(x)/dx = s(x)*(1-s(x)) //s(x) = 1 / (exp(-x) + 1) INDArray z = Nd4j.zeros(100); double[] expOut = new double[100]; for (int i = 0; i < 100; i++) { double x = 0.1 * (i - 50); z.putScalar(i, x); double tanh = FastMath.tanh(x); expOut[i] = 1.0 - tanh * tanh; } INDArray zPrime = Nd4j.getExecutioner().exec(new TanhDerivative(z)); for (int i = 0; i < 100; i++) { double relError = Math.abs(expOut[i] - zPrime.getDouble(i)) / (Math.abs(expOut[i]) + Math.abs(zPrime.getDouble(i))); assertTrue(relError < REL_ERROR_TOLERANCE); } }
Example 4
Source File: RandomProjectionLSHTest.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testSearchReflexive() { rpLSH.makeIndex(inputs); int idx = (new Random(12345)).nextInt(100); INDArray row = inputs.getRow(idx, true); INDArray searchResults = rpLSH.search(row, 10.0f); INDArray res = Nd4j.zeros(DataType.BOOL, searchResults.shape()); Nd4j.getExecutioner().exec(new BroadcastEqualTo(searchResults, row, res, -1)); res = res.castTo(DataType.FLOAT); assertEquals( String.format("Expected one search result to be the query %s, but found %s", row, searchResults), 1.0f, res.min(-1).maxNumber().floatValue(), 1e-3f); }
Example 5
Source File: NormalizerStandardizeLabelsTest.java From nd4j with Apache License 2.0 | 6 votes |
public genRandomDataSet(int nSamples, int nFeatures, int a, int b, long randSeed) { /* if a =1 and b = 0,normal distribution otherwise with some random mean and some random distribution */ int i = 0; // Randomly generate scaling constants and add offsets // to get aA and bB INDArray aA = a == 1 ? Nd4j.ones(1, nFeatures) : Nd4j.rand(1, nFeatures, randSeed).mul(a); //a = 1, don't scale INDArray bB = Nd4j.rand(1, nFeatures, randSeed).mul(b); //b = 0 this zeros out // transform ndarray as X = aA + bB * X INDArray randomFeatures = Nd4j.zeros(nSamples, nFeatures); while (i < nFeatures) { INDArray randomSlice = Nd4j.randn(nSamples, 1, randSeed); randomSlice.muli(aA.getScalar(0, i)); randomSlice.addi(bB.getScalar(0, i)); randomFeatures.putColumn(i, randomSlice); i++; } INDArray randomLabels = randomFeatures.dup(); this.sampleDataSet = new DataSet(randomFeatures, randomLabels); this.theoreticalMean = bB.dup(); this.theoreticalStd = aA.dup(); this.theoreticalSEM = this.theoreticalStd.div(Math.sqrt(nSamples)); }
Example 6
Source File: ShufflesTests.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testSymmetricShuffle2() { INDArray features = Nd4j.zeros(10, 10, 20); INDArray labels = Nd4j.zeros(10, 10, 3); for (int x = 0; x < 10; x++) { features.slice(x).assign(x); labels.slice(x).assign(x); } // System.out.println(features); OrderScanner3D scannerFeatures = new OrderScanner3D(features); OrderScanner3D scannerLabels = new OrderScanner3D(labels); List<INDArray> list = new ArrayList<>(); list.add(features); list.add(labels); Nd4j.shuffle(list, 1, 2); // System.out.println(features); // System.out.println("------------------"); // System.out.println(labels); assertTrue(scannerFeatures.compareSlice(features)); assertTrue(scannerLabels.compareSlice(labels)); for (int x = 0; x < 10; x++) { double val = features.slice(x).getDouble(0); INDArray row = labels.slice(x); for (int y = 0; y < row.length(); y++) { assertEquals(val, row.getDouble(y), 0.001); } } }
Example 7
Source File: Utils.java From wekaDeeplearning4j with GNU General Public License v3.0 | 5 votes |
/** * Converts a set of training instances to a DataSet. Assumes that the instances have been * suitably preprocessed - i.e. missing values replaced and nominals converted to binary/numeric. * Also assumes that the class index has been set * * @param insts the instances to convert * @return a DataSet */ public static DataSet instancesToDataSet(Instances insts) { INDArray data = Nd4j.zeros(insts.numInstances(), insts.numAttributes() - 1); INDArray outcomes = Nd4j.zeros(insts.numInstances(), insts.numClasses()); for (int i = 0; i < insts.numInstances(); i++) { double[] independent = new double[insts.numAttributes() - 1]; double[] dependent = new double[insts.numClasses()]; Instance current = insts.instance(i); for (int j = 0; j < current.numValues(); j++) { int index = current.index(j); double value = current.valueSparse(j); if (index < insts.classIndex()) { independent[index] = value; } else if (index > insts.classIndex()) { // Shift by -1, since the class is left out from the feature matrix and put into a separate // outcomes matrix independent[index - 1] = value; } } // Set class values if (insts.numClasses() > 1) { // Classification final int oneHotIdx = (int) current.classValue(); dependent[oneHotIdx] = 1.0; } else { // Regression (currently only single class) dependent[0] = current.classValue(); } INDArray row = Nd4j.create(independent); data.putRow(i, row); outcomes.putRow(i, Nd4j.create(dependent)); } return new DataSet(data, outcomes); }
Example 8
Source File: WorkspaceTests.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testWithPreprocessorsCG() { //https://github.com/deeplearning4j/deeplearning4j/issues/4347 //Cause for the above issue was layerVertex.setInput() applying the preprocessor, with the result // not being detached properly from the workspace... for (WorkspaceMode wm : WorkspaceMode.values()) { System.out.println(wm); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() .trainingWorkspaceMode(wm) .inferenceWorkspaceMode(wm) .graphBuilder() .addInputs("in") .addLayer("e", new GravesLSTM.Builder().nIn(10).nOut(5).build(), new DupPreProcessor(), "in") // .addLayer("e", new GravesLSTM.Builder().nIn(10).nOut(5).build(), "in") //Note that no preprocessor is OK .addLayer("rnn", new GravesLSTM.Builder().nIn(5).nOut(8).build(), "e") .addLayer("out", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE) .activation(Activation.SIGMOID).nOut(3).build(), "rnn") .setInputTypes(InputType.recurrent(10)) .setOutputs("out") .build(); ComputationGraph cg = new ComputationGraph(conf); cg.init(); INDArray[] input = new INDArray[]{Nd4j.zeros(1, 10, 5)}; for (boolean train : new boolean[]{false, true}) { cg.clear(); cg.feedForward(input, train); } cg.setInputs(input); cg.setLabels(Nd4j.rand(new int[]{1, 3, 5})); cg.computeGradientAndScore(); } }
Example 9
Source File: CudaGridExecutioner.java From nd4j with Apache License 2.0 | 4 votes |
protected void buildZ(Accumulation op, int... dimension) { Arrays.sort(dimension); for (int i = 0; i < dimension.length; i++) { if (dimension[i] < 0) dimension[i] += op.x().rank(); } //do op along all dimensions if (dimension.length == op.x().rank()) dimension = new int[] {Integer.MAX_VALUE}; long[] retShape = Shape.wholeArrayDimension(dimension) ? new long[] {1, 1} : ArrayUtil.removeIndex(op.x().shape(), dimension); //ensure vector is proper shape if (retShape.length == 1) { if (dimension[0] == 0) retShape = new long[] {1, retShape[0]}; else retShape = new long[] {retShape[0], 1}; } else if (retShape.length == 0) { retShape = new long[] {1, 1}; } /* if(op.x().isVector() && op.x().length() == ArrayUtil.prod(retShape)) return op.noOp(); */ INDArray ret = null; if (op.z() == null || op.z() == op.x()) { if (op.isComplexAccumulation()) { val xT = op.x().tensorssAlongDimension(dimension); val yT = op.y().tensorssAlongDimension(dimension); ret = Nd4j.create(xT, yT); } else { if (Math.abs(op.zeroDouble()) < Nd4j.EPS_THRESHOLD) { ret = Nd4j.zeros(retShape); } else { ret = Nd4j.valueArrayOf(retShape, op.zeroDouble()); } } op.setZ(ret); } else { // compare length if (op.z().lengthLong() != ArrayUtil.prodLong(retShape)) throw new ND4JIllegalStateException("Shape of target array for reduction [" + Arrays.toString(op.z().shape()) + "] doesn't match expected [" + Arrays.toString(retShape) + "]"); if (op.x().data().dataType() == DataBuffer.Type.DOUBLE) { op.z().assign(op.zeroDouble()); } else if (op.x().data().dataType() == DataBuffer.Type.FLOAT) { op.z().assign(op.zeroFloat()); } else if (op.x().data().dataType() == DataBuffer.Type.HALF) { op.z().assign(op.zeroHalf()); } ret = op.z(); } }
Example 10
Source File: AtomicAllocatorTest.java From nd4j with Apache License 2.0 | 4 votes |
@Override public void run() { log.info(this.getName() + "/"+ this.getId() + " started on device ["+AtomicAllocator.getInstance().getDeviceId()+"]"); AtomicLong cnt = new AtomicLong(0); AtomicLong cntX = new AtomicLong(0); while(true) { INDArray array1 = Nd4j.zeros(15,15); INDArray array2 = Nd4j.create(new float[]{2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f}); int idx = 0; long time1 = 0; long time2 = 0; for (int x = 0; x < 30; x++) { time1 = System.nanoTime(); array1.addiRowVector(array2); time2 = System.nanoTime(); cntX.incrementAndGet(); } if (cnt.incrementAndGet() % 1000 == 0) { log.info("AddiRowVector execution time: [" + (time2 - time1) + "] ns on device ["+ allocator.getDeviceId(array1)+"]"); for (int y = 0; y < 15; y++) { for (int x = 0; x < 15; x++) { assertEquals(60.0f, array1.getRow(y).getFloat(x), 0.01); } } if (threadId == 0) { log.info("Total calls: " + cntX.get() * 4); log.info("Total memory allocated on device [0]: " + allocator.getTotalAllocatedDeviceMemory(0)); } try { Thread.sleep(5000); } catch (Exception e) { throw new RuntimeException(e); } } } }
Example 11
Source File: CNNGradientCheckTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testCnnWithSubsamplingV2() { Nd4j.getRandom().setSeed(12345); int nOut = 4; int[] minibatchSizes = {1, 3}; int width = 5; int height = 5; int inputDepth = 1; int[] kernel = {2, 2}; int[] stride = {1, 1}; int[] padding = {0, 0}; int pNorm = 3; Activation[] activations = {Activation.SIGMOID, Activation.TANH}; SubsamplingLayer.PoolingType[] poolingTypes = new SubsamplingLayer.PoolingType[]{SubsamplingLayer.PoolingType.MAX, SubsamplingLayer.PoolingType.AVG, SubsamplingLayer.PoolingType.PNORM}; boolean nchw = format == CNN2DFormat.NCHW; for (Activation afn : activations) { for (SubsamplingLayer.PoolingType poolingType : poolingTypes) { for (int minibatchSize : minibatchSizes) { long[] inShape = nchw ? new long[]{minibatchSize, inputDepth, height, width} : new long[]{minibatchSize, height, width, inputDepth}; INDArray input = Nd4j.rand(DataType.DOUBLE, inShape); INDArray labels = Nd4j.zeros(minibatchSize, nOut); for (int i = 0; i < minibatchSize; i++) { labels.putScalar(new int[]{i, i % nOut}, 1.0); } MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new NoOp()) .dataType(DataType.DOUBLE) .dist(new NormalDistribution(0, 1)) .list().layer(0, new ConvolutionLayer.Builder(kernel, stride, padding).nIn(inputDepth) .nOut(3).build())//output: (5-2+0)/1+1 = 4 .layer(1, new SubsamplingLayer.Builder(poolingType) .kernelSize(kernel).stride(stride).padding(padding) .pnorm(pNorm).build()) //output: (4-2+0)/1+1 =3 -> 3x3x3 .layer(2, new ConvolutionLayer.Builder(kernel, stride, padding) .nIn(3).nOut(2).build()) //Output: (3-2+0)/1+1 = 2 .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(2 * 2 * 2) .nOut(4).build()) .setInputType(InputType.convolutional(height, width, inputDepth, format)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); String msg = "PoolingType=" + poolingType + ", minibatch=" + minibatchSize + ", activationFn=" + afn; System.out.println(msg); boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels); assertTrue(msg, gradOK); TestUtils.testModelSerialization(net); } } } }
Example 12
Source File: ROCTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testCompare2Vs3Classes() { //ROC multi-class: 2 vs. 3 classes should be the same, if we add two of the classes together... //Both methods implement one vs. all ROC/AUC in different ways int nExamples = 200; INDArray predictions3 = Nd4j.rand(nExamples, 3); INDArray tempSum = predictions3.sum(1); predictions3.diviColumnVector(tempSum); INDArray labels3 = Nd4j.create(nExamples, 3); Random r = new Random(12345); for (int i = 0; i < nExamples; i++) { labels3.putScalar(i, r.nextInt(3), 1.0); } INDArray predictions2 = Nd4j.zeros(nExamples, 2); predictions2.getColumn(0).assign(predictions3.getColumn(0)); predictions2.getColumn(0).addi(predictions3.getColumn(1)); predictions2.getColumn(1).addi(predictions3.getColumn(2)); INDArray labels2 = Nd4j.zeros(nExamples, 2); labels2.getColumn(0).assign(labels3.getColumn(0)); labels2.getColumn(0).addi(labels3.getColumn(1)); labels2.getColumn(1).addi(labels3.getColumn(2)); for (int numSteps : new int[] {30, 0}) { //Steps = 0: exact ROCMultiClass rocMultiClass3 = new ROCMultiClass(numSteps); ROCMultiClass rocMultiClass2 = new ROCMultiClass(numSteps); rocMultiClass3.eval(labels3, predictions3); rocMultiClass2.eval(labels2, predictions2); double auc3 = rocMultiClass3.calculateAUC(2); double auc2 = rocMultiClass2.calculateAUC(1); assertEquals(auc2, auc3, 1e-6); RocCurve c3 = rocMultiClass3.getRocCurve(2); RocCurve c2 = rocMultiClass2.getRocCurve(1); assertArrayEquals(c2.getThreshold(), c3.getThreshold(), 1e-6); assertArrayEquals(c2.getFpr(), c3.getFpr(), 1e-6); assertArrayEquals(c2.getTpr(), c3.getTpr(), 1e-6); } }
Example 13
Source File: BooleanIndexingTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
/** * This test fails, because it highlights current mechanics on SpecifiedIndex stuff. * Internally there's * * @throws Exception */ @Test public void testSliceAssign1() { INDArray array = Nd4j.zeros(4, 4); INDArray patch = Nd4j.create(new float[] {1e-5f, 1e-5f, 1e-5f}); INDArray slice = array.slice(1); int[] idx = new int[] {0, 1, 3}; INDArrayIndex[] range = new INDArrayIndex[] {new SpecifiedIndex(idx)}; INDArray subarray = slice.get(range); //System.out.println("Subarray: " + Arrays.toString(subarray.data().asFloat()) + " isView: " + subarray.isView()); slice.put(range, patch); //System.out.println("Array after being patched: " + Arrays.toString(array.data().asFloat())); assertFalse(BooleanIndexing.and(array, Conditions.equals(0f))); }
Example 14
Source File: BatchNormalizationTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testCnnForwardBackward() { double eps = 1e-5; int nIn = 4; int hw = 3; int minibatch = 2; Nd4j.getRandom().setSeed(12345); INDArray input = Nd4j.rand('c', new int[]{minibatch, nIn, hw, hw}); //TODO: other values for gamma/beta INDArray gamma = Nd4j.ones(1, nIn); INDArray beta = Nd4j.zeros(1, nIn); Layer l = getLayer(nIn, eps, false, -1, -1); INDArray mean = input.mean(0, 2, 3); INDArray var = input.var(false, 0, 2, 3); INDArray xHat = Nd4j.getExecutioner().exec(new BroadcastSubOp(input, mean, input.dup(), 1)); Nd4j.getExecutioner().exec(new BroadcastDivOp(xHat, Transforms.sqrt(var.add(eps), true), xHat, 1)); INDArray outExpected = Nd4j.getExecutioner().exec(new BroadcastMulOp(xHat, gamma, xHat.dup(), 1)); Nd4j.getExecutioner().exec(new BroadcastAddOp(outExpected, beta, outExpected, 1)); INDArray out = l.activate(input, true, LayerWorkspaceMgr.noWorkspaces()); // System.out.println(Arrays.toString(outExpected.data().asDouble())); // System.out.println(Arrays.toString(out.data().asDouble())); assertEquals(outExpected, out); //------------------------------------------------------------- //Check backprop INDArray epsilon = Nd4j.rand('c', new int[]{minibatch, nIn, hw, hw}); //dL/dy int effectiveMinibatch = minibatch * hw * hw; INDArray dldgammaExp = epsilon.mul(xHat).sum(0, 2, 3); dldgammaExp = dldgammaExp.reshape(1, dldgammaExp.length()); INDArray dldbetaExp = epsilon.sum(0, 2, 3); dldbetaExp = dldbetaExp.reshape(1, dldbetaExp.length()); INDArray dldxhat = Nd4j.getExecutioner().exec(new BroadcastMulOp(epsilon, gamma, epsilon.dup(), 1)); //epsilon.mulRowVector(gamma); INDArray inputSubMean = Nd4j.getExecutioner().exec(new BroadcastSubOp(input, mean, input.dup(), 1)); INDArray dldvar = dldxhat.mul(inputSubMean).mul(-0.5); dldvar = Nd4j.getExecutioner().exec( new BroadcastMulOp(dldvar, Transforms.pow(var.add(eps), -3.0 / 2.0, true), dldvar.dup(), 1)); dldvar = dldvar.sum(0, 2, 3); INDArray dldmu = Nd4j .getExecutioner().exec(new BroadcastMulOp(dldxhat, Transforms.pow(var.add(eps), -1.0 / 2.0, true), dldxhat.dup(), 1)) .neg().sum(0, 2, 3); dldmu = dldmu.add(dldvar.mul(inputSubMean.mul(-2.0).sum(0, 2, 3).div(effectiveMinibatch))); INDArray dldinExp = Nd4j.getExecutioner().exec( new BroadcastMulOp(dldxhat, Transforms.pow(var.add(eps), -1.0 / 2.0, true), dldxhat.dup(), 1)); dldinExp = dldinExp.add(Nd4j.getExecutioner().exec( new BroadcastMulOp(inputSubMean.mul(2.0 / effectiveMinibatch), dldvar, inputSubMean.dup(), 1))); dldinExp = Nd4j.getExecutioner().exec( new BroadcastAddOp(dldinExp, dldmu.mul(1.0 / effectiveMinibatch), dldinExp.dup(), 1)); Pair<Gradient, INDArray> p = l.backpropGradient(epsilon, LayerWorkspaceMgr.noWorkspaces()); INDArray dldgamma = p.getFirst().getGradientFor("gamma"); INDArray dldbeta = p.getFirst().getGradientFor("beta"); assertEquals(dldgammaExp, dldgamma); assertEquals(dldbetaExp, dldbeta); // System.out.println("EPSILONS"); // System.out.println(Arrays.toString(dldinExp.data().asDouble())); // System.out.println(Arrays.toString(p.getSecond().dup().data().asDouble())); assertEquals(dldinExp, p.getSecond()); }
Example 15
Source File: DTypeTests.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testCapsNetDtypes() { for (DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) { Nd4j.setDefaultDataTypes(globalDtype, globalDtype); for (DataType networkDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) { assertEquals(globalDtype, Nd4j.dataType()); assertEquals(globalDtype, Nd4j.defaultFloatingPointType()); String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype; int primaryCapsDim = 2; int primarpCapsChannel = 8; int capsule = 5; int minibatchSize = 8; int routing = 1; int capsuleDim = 4; int height = 6; int width = 6; int inputDepth = 4; MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .dataType(networkDtype) .seed(123) .updater(new NoOp()) .weightInit(new WeightInitDistribution(new UniformDistribution(-6, 6))) .list() .layer(new PrimaryCapsules.Builder(primaryCapsDim, primarpCapsChannel) .kernelSize(3, 3) .stride(2, 2) .build()) .layer(new CapsuleLayer.Builder(capsule, capsuleDim, routing).build()) .layer(new CapsuleStrengthLayer.Builder().build()) .layer(new ActivationLayer.Builder(new ActivationSoftmax()).build()) .layer(new LossLayer.Builder(new LossNegativeLogLikelihood()).build()) .setInputType(InputType.convolutional(height, width, inputDepth)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); INDArray in = Nd4j.rand(networkDtype, minibatchSize, inputDepth * height * width).mul(10) .reshape(-1, inputDepth, height, width); INDArray label = Nd4j.zeros(networkDtype, minibatchSize, capsule); for (int i = 0; i < minibatchSize; i++) { label.putScalar(new int[]{i, i % capsule}, 1.0); } INDArray out = net.output(in); assertEquals(msg, networkDtype, out.dataType()); List<INDArray> ff = net.feedForward(in); for (int i = 0; i < ff.size(); i++) { String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).conf().getLayer().getClass().getSimpleName()); assertEquals(s, networkDtype, ff.get(i).dataType()); } net.setInput(in); net.setLabels(label); net.computeGradientAndScore(); net.fit(new DataSet(in, label)); logUsedClasses(net); //Now, test mismatched dtypes for input/labels: for (DataType inputLabelDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) { INDArray in2 = in.castTo(inputLabelDtype); INDArray label2 = label.castTo(inputLabelDtype); net.output(in2); net.setInput(in2); net.setLabels(label2); net.computeGradientAndScore(); net.fit(new DataSet(in2, label2)); } } } }
Example 16
Source File: OutputLayerTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testOutputLayersRnnForwardPass() { //Test output layer with RNNs ( //Expect all outputs etc. to be 2d int nIn = 2; int nOut = 5; int layerSize = 4; int timeSeriesLength = 6; int miniBatchSize = 3; Random r = new Random(12345L); INDArray input = Nd4j.zeros(miniBatchSize, nIn, timeSeriesLength); for (int i = 0; i < miniBatchSize; i++) { for (int j = 0; j < nIn; j++) { for (int k = 0; k < timeSeriesLength; k++) { input.putScalar(new int[] {i, j, k}, r.nextDouble() - 0.5); } } } MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345L).list() .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize) .dist(new NormalDistribution(0, 1)).activation(Activation.TANH) .updater(new NoOp()).build()) .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut) .dist(new NormalDistribution(0, 1)) .updater(new NoOp()).build()) .inputPreProcessor(1, new RnnToFeedForwardPreProcessor()).build(); MultiLayerNetwork mln = new MultiLayerNetwork(conf); mln.init(); INDArray out2d = mln.feedForward(input).get(2); assertArrayEquals(out2d.shape(), new long[] {miniBatchSize * timeSeriesLength, nOut}); INDArray out = mln.output(input); assertArrayEquals(out.shape(), new long[] {miniBatchSize * timeSeriesLength, nOut}); INDArray preout = mln.output(input); assertArrayEquals(preout.shape(), new long[] {miniBatchSize * timeSeriesLength, nOut}); //As above, but for RnnOutputLayer. Expect all activations etc. to be 3d MultiLayerConfiguration confRnn = new NeuralNetConfiguration.Builder().seed(12345L).list() .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize) .dist(new NormalDistribution(0, 1)).activation(Activation.TANH) .updater(new NoOp()).build()) .layer(1, new org.deeplearning4j.nn.conf.layers.RnnOutputLayer.Builder(LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut) .dist(new NormalDistribution(0, 1)) .updater(new NoOp()).build()) .build(); MultiLayerNetwork mlnRnn = new MultiLayerNetwork(confRnn); mln.init(); INDArray out3d = mlnRnn.feedForward(input).get(2); assertArrayEquals(out3d.shape(), new long[] {miniBatchSize, nOut, timeSeriesLength}); INDArray outRnn = mlnRnn.output(input); assertArrayEquals(outRnn.shape(), new long[] {miniBatchSize, nOut, timeSeriesLength}); INDArray preoutRnn = mlnRnn.output(input); assertArrayEquals(preoutRnn.shape(), new long[] {miniBatchSize, nOut, timeSeriesLength}); }
Example 17
Source File: MultiDataSetTest.java From nd4j with Apache License 2.0 | 4 votes |
@Test public void testToString() { //Mask arrays, and different lengths int tsLengthIn0 = 8; int tsLengthIn1 = 9; int tsLengthOut0 = 10; int tsLengthOut1 = 11; int nRows = 5; int nColsIn0 = 3; int nColsIn1 = 4; int nColsOut0 = 5; int nColsOut1 = 6; INDArray expectedIn0 = Nd4j.zeros(nRows, nColsIn0, tsLengthIn0); INDArray expectedIn1 = Nd4j.zeros(nRows, nColsIn1, tsLengthIn1); INDArray expectedOut0 = Nd4j.zeros(nRows, nColsOut0, tsLengthOut0); INDArray expectedOut1 = Nd4j.zeros(nRows, nColsOut1, tsLengthOut1); INDArray expectedMaskIn0 = Nd4j.zeros(nRows, tsLengthIn0); INDArray expectedMaskIn1 = Nd4j.zeros(nRows, tsLengthIn1); INDArray expectedMaskOut0 = Nd4j.zeros(nRows, tsLengthOut0); INDArray expectedMaskOut1 = Nd4j.zeros(nRows, tsLengthOut1); Random r = new Random(12345); List<MultiDataSet> list = new ArrayList<>(nRows); for (int i = 0; i < nRows; i++) { int thisRowIn0Length = tsLengthIn0 - i; int thisRowIn1Length = tsLengthIn1 - i; int thisRowOut0Length = tsLengthOut0 - i; int thisRowOut1Length = tsLengthOut1 - i; int in0NumElem = thisRowIn0Length * nColsIn0; INDArray in0 = Nd4j.linspace(0, in0NumElem - 1, in0NumElem).reshape(1, nColsIn0, thisRowIn0Length); int in1NumElem = thisRowIn1Length * nColsIn1; INDArray in1 = Nd4j.linspace(0, in1NumElem - 1, in1NumElem).reshape(1, nColsIn1, thisRowIn1Length); int out0NumElem = thisRowOut0Length * nColsOut0; INDArray out0 = Nd4j.linspace(0, out0NumElem - 1, out0NumElem).reshape(1, nColsOut0, thisRowOut0Length); int out1NumElem = thisRowOut1Length * nColsOut1; INDArray out1 = Nd4j.linspace(0, out1NumElem - 1, out1NumElem).reshape(1, nColsOut1, thisRowOut1Length); INDArray maskIn0 = null; INDArray maskIn1 = Nd4j.zeros(1, thisRowIn1Length); for (int j = 0; j < thisRowIn1Length; j++) { if (r.nextBoolean()) maskIn1.putScalar(j, 1.0); } INDArray maskOut0 = null; INDArray maskOut1 = Nd4j.zeros(1, thisRowOut1Length); for (int j = 0; j < thisRowOut1Length; j++) { if (r.nextBoolean()) maskOut1.putScalar(j, 1.0); } expectedIn0.put(new INDArrayIndex[] {NDArrayIndex.point(i), NDArrayIndex.all(), NDArrayIndex.interval(0, thisRowIn0Length)}, in0); expectedIn1.put(new INDArrayIndex[] {NDArrayIndex.point(i), NDArrayIndex.all(), NDArrayIndex.interval(0, thisRowIn1Length)}, in1); expectedOut0.put(new INDArrayIndex[] {NDArrayIndex.point(i), NDArrayIndex.all(), NDArrayIndex.interval(0, thisRowOut0Length)}, out0); expectedOut1.put(new INDArrayIndex[] {NDArrayIndex.point(i), NDArrayIndex.all(), NDArrayIndex.interval(0, thisRowOut1Length)}, out1); expectedMaskIn0.put(new INDArrayIndex[] {NDArrayIndex.point(i), NDArrayIndex.interval(0, thisRowIn0Length)}, Nd4j.ones(1, thisRowIn0Length)); expectedMaskIn1.put(new INDArrayIndex[] {NDArrayIndex.point(i), NDArrayIndex.interval(0, thisRowIn1Length)}, maskIn1); expectedMaskOut0.put( new INDArrayIndex[] {NDArrayIndex.point(i), NDArrayIndex.interval(0, thisRowOut0Length)}, Nd4j.ones(1, thisRowOut0Length)); expectedMaskOut1.put( new INDArrayIndex[] {NDArrayIndex.point(i), NDArrayIndex.interval(0, thisRowOut1Length)}, maskOut1); list.add(new MultiDataSet(new INDArray[] {in0, in1}, new INDArray[] {out0, out1}, new INDArray[] {maskIn0, maskIn1}, new INDArray[] {maskOut0, maskOut1})); } MultiDataSet merged = MultiDataSet.merge(list); System.out.println(merged); }
Example 18
Source File: NoBiasGradientCheckTests.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testGradientNoBiasEmbedding() { int nIn = 5; int nOut = 3; int layerSize = 6; for (int minibatch : new int[]{1, 4}) { INDArray input = Nd4j.zeros(minibatch, 1); for (int i = 0; i < minibatch; i++) { input.putScalar(i, 0, i % layerSize); } INDArray labels = Nd4j.zeros(minibatch, nOut); for (int i = 0; i < minibatch; i++) { labels.putScalar(i, i % nOut, 1.0); } for (boolean embeddingHasBias : new boolean[]{true, false}) { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .seed(12345L) .list() .layer(0, new EmbeddingLayer.Builder().nIn(nIn).nOut(layerSize) .dist(new NormalDistribution(0, 1)) .activation(Activation.TANH) .hasBias(embeddingHasBias) .build()) .layer(1, new OutputLayer.Builder(LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut) .dist(new NormalDistribution(0, 1)) .build()) .build(); MultiLayerNetwork mln = new MultiLayerNetwork(conf); mln.init(); if (embeddingHasBias) { assertEquals(nIn * layerSize + layerSize, mln.getLayer(0).numParams()); } else { assertEquals(nIn * layerSize, mln.getLayer(0).numParams()); } String msg = "testGradientNoBiasEmbedding(), minibatch = " + minibatch + ", embeddingHasBias = " + embeddingHasBias + ")"; if (PRINT_RESULTS) { System.out.println(msg); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels); assertTrue(msg, gradOK); TestUtils.testModelSerialization(mln); } } }
Example 19
Source File: CNNGradientCheckTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Ignore @Test public void testCnnWithSpaceToDepth() { Nd4j.getRandom().setSeed(12345); int nOut = 4; int minibatchSize = 2; int width = 5; int height = 5; int inputDepth = 1; int[] kernel = {2, 2}; int blocks = 2; String[] activations = {"sigmoid"}; SubsamplingLayer.PoolingType[] poolingTypes = new SubsamplingLayer.PoolingType[]{SubsamplingLayer.PoolingType.MAX, SubsamplingLayer.PoolingType.AVG, SubsamplingLayer.PoolingType.PNORM}; for (String afn : activations) { for (SubsamplingLayer.PoolingType poolingType : poolingTypes) { INDArray input = Nd4j.rand(minibatchSize, width * height * inputDepth); INDArray labels = Nd4j.zeros(minibatchSize, nOut); for (int i = 0; i < minibatchSize; i++) { labels.putScalar(new int[]{i, i % nOut}, 1.0); } MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .dist(new NormalDistribution(0, 1)) .list().layer(new ConvolutionLayer.Builder(kernel).nIn(inputDepth).hasBias(false) .cudnnAllowFallback(false) .nOut(1).build()) //output: (5-2+0)/1+1 = 4 .layer(new SpaceToDepthLayer.Builder(blocks, SpaceToDepthLayer.DataFormat.NCHW) .build()) // (mb,1,4,4) -> (mb,4,2,2) .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(2 * 2 * 4) .nOut(nOut).build()) .setInputType(InputType.convolutionalFlat(height, width, inputDepth)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); String msg = "PoolingType=" + poolingType + ", minibatch=" + minibatchSize + ", activationFn=" + afn; if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < net.getnLayers(); j++) // System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels); assertTrue(msg, gradOK); TestUtils.testModelSerialization(net); } } }
Example 20
Source File: QLearning.java From dl4j-tutorials with MIT License | 4 votes |
public static INDArray buildQTable() { return Nd4j.zeros(N_STATE, ACTIONS.length); }