Java Code Examples for org.nd4j.linalg.factory.Nd4j#concat()
The following examples show how to use
org.nd4j.linalg.factory.Nd4j#concat() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DeepGL.java From ml-models with Apache License 2.0 | 6 votes |
private void diffuse(List<Pruning.Feature> featuresList) { INDArray ndDiffused = Nd4j.create(embedding.shape()); Nd4j.copy(embedding, ndDiffused); featuresList.addAll(featuresList); features = featuresList.toArray(new Pruning.Feature[0]); for (int i = features.length / 2; i < features.length; i++) { features[i] = new Pruning.Feature("diffuse", features[i]); } for (int diffIteration = 0; diffIteration < diffusionIterations; diffIteration++) { INDArray ndDiffusedTemp = Nd4j.create(embedding.shape()); nodeQueue.set(0); final ArrayList<Future<?>> futures = new ArrayList<>(); for (int i = 0; i < concurrency; i++) { futures.add(executorService.submit(new DiffusionTask(ndDiffused, ndDiffusedTemp))); } ParallelUtil.awaitTermination(futures); ndDiffused = ndDiffusedTemp; } embedding = Nd4j.concat(1, embedding, ndDiffused); }
Example 2
Source File: LossCurve.java From deeplearning4j with Apache License 2.0 | 5 votes |
/** * Return a new LossCurve with the given losses added on as the most recent epoch */ public LossCurve addLossAndCopy(double[] values, List<String> lossNames){ return new LossCurve( Nd4j.concat(0, lossValues, Nd4j.createFromArray(new double[][]{values}).castTo(DataType.FLOAT)), lossNames); }
Example 3
Source File: ConcatTests.java From nd4j with Apache License 2.0 | 5 votes |
@Test public void testConcat() { INDArray A = Nd4j.linspace(1, 8, 8).reshape(2, 2, 2); INDArray B = Nd4j.linspace(1, 12, 12).reshape(3, 2, 2); INDArray concat = Nd4j.concat(0, A, B); assertTrue(Arrays.equals(new long[] {5, 2, 2}, concat.shape())); }
Example 4
Source File: ConcatTestsC.java From nd4j with Apache License 2.0 | 5 votes |
@Test public void testConcatScalars() { INDArray first = Nd4j.arange(0, 1).reshape(1, 1); INDArray second = Nd4j.arange(0, 1).reshape(1, 1); INDArray firstRet = Nd4j.concat(0, first, second); assertTrue(firstRet.isColumnVector()); INDArray secondRet = Nd4j.concat(1, first, second); assertTrue(secondRet.isRowVector()); }
Example 5
Source File: IndexingTestsC.java From nd4j with Apache License 2.0 | 5 votes |
@Test public void testConcatColumns() { INDArray input1 = Nd4j.zeros(2, 1); INDArray input2 = Nd4j.ones(2, 1); INDArray concat = Nd4j.concat(1, input1, input2); INDArray assertion = Nd4j.create(new double[][] {{0, 1}, {0, 1}}); assertEquals(assertion, concat); }
Example 6
Source File: NormalizerStandardizeTest.java From nd4j with Apache License 2.0 | 5 votes |
@Test public void testUnderOverflow() { // This dataset will be basically constant with a small std deviation // And the constant is large. Checking if algorithm can handle double tolerancePerc = 1; //Within 1 % double toleranceAbs = 0.0005; int nSamples = 1000; int bSize = 10; int x = -1000000, y = 1000000; double z = 1000000; INDArray featureX = Nd4j.rand(nSamples, 1).mul(1).add(x); INDArray featureY = Nd4j.rand(nSamples, 1).mul(2).add(y); INDArray featureZ = Nd4j.rand(nSamples, 1).mul(3).add(z); INDArray featureSet = Nd4j.concat(1, featureX, featureY, featureZ); INDArray labelSet = Nd4j.zeros(nSamples, 1); DataSet sampleDataSet = new DataSet(featureSet, labelSet); DataSetIterator sampleIter = new TestDataSetIterator(sampleDataSet, bSize); INDArray theoreticalMean = Nd4j.create(new double[] {x, y, z}); NormalizerStandardize myNormalizer = new NormalizerStandardize(); myNormalizer.fit(sampleIter); INDArray meanDelta = Transforms.abs(theoreticalMean.sub(myNormalizer.getMean())); INDArray meanDeltaPerc = meanDelta.mul(100).div(theoreticalMean); assertTrue(meanDeltaPerc.max(1).getDouble(0, 0) < tolerancePerc); //this just has to not barf //myNormalizer.transform(sampleIter); myNormalizer.transform(sampleDataSet); }
Example 7
Source File: ConcatTests.java From nd4j with Apache License 2.0 | 5 votes |
@Test public void testConcatScalars() { INDArray first = Nd4j.arange(0, 1).reshape(1, 1); INDArray second = Nd4j.arange(0, 1).reshape(1, 1); INDArray firstRet = Nd4j.concat(0, first, second); assertTrue(firstRet.isColumnVector()); INDArray secondRet = Nd4j.concat(1, first, second); assertTrue(secondRet.isRowVector()); }
Example 8
Source File: TransitionTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
private Observation buildNextObservation(double[][] obs, double[] nextObs) { INDArray[] nextHistory = new INDArray[] { Nd4j.create(nextObs).reshape(1, 3), Nd4j.create(obs[0]).reshape(1, 3), Nd4j.create(obs[1]).reshape(1, 3), }; return new Observation(Nd4j.concat(0, nextHistory)); }
Example 9
Source File: SpecialTests.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testConcatAgain(){ INDArray[] toConcat = new INDArray[3]; for( int i=0; i<toConcat.length; i++ ) { toConcat[i] = Nd4j.valueArrayOf(new long[]{10, 1}, i).castTo(DataType.FLOAT); } INDArray out = Nd4j.concat(1, toConcat); // System.out.println(out); }
Example 10
Source File: ConcatTestsC.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testConcatScalars1() { INDArray first = Nd4j.scalar(1); INDArray second = Nd4j.scalar(2); INDArray third = Nd4j.scalar(3); INDArray result = Nd4j.concat(0, first, second, third); assertEquals(1f, result.getFloat(0), 0.01f); assertEquals(2f, result.getFloat(1), 0.01f); assertEquals(3f, result.getFloat(2), 0.01f); }
Example 11
Source File: Dl4jMlpClassifier.java From wekaDeeplearning4j with GNU General Public License v3.0 | 5 votes |
/** * Uses the given set of layers to extract features for the given dataset * @param layerNames Layer * @param input data to featurize * @param poolingType pooling type to use * @return Instances transformed to the image features */ public Instances getActivationsAtLayers(String[] layerNames, Instances input, PoolingType poolingType) throws Exception { DataSetIterator iter = getDataSetIterator(input); INDArray result = null; Map<String, Long> attributesPerLayer = new LinkedHashMap<>(); log.info("Getting features from layers: " + Arrays.toString(layerNames)); for (String layerName : layerNames) { if (attributesPerLayer.containsKey(layerName)) { log.warn("Concatenating two identical layers not supported"); continue; } INDArray activationsAtLayer = featurizeForLayer(layerName, iter, poolingType); attributesPerLayer.put(layerName, activationsAtLayer.shape()[1]); if (result == null) { result = activationsAtLayer; } else { // Concatenate the activations of this layer with the other feature extraction layers result = Nd4j.concat(1, result, activationsAtLayer); } } result = Utils.appendClasses(result, input); return Utils.convertToInstances(result, input, attributesPerLayer); }
Example 12
Source File: Utils.java From wekaDeeplearning4j with GNU General Public License v3.0 | 5 votes |
/** * Appends the input Instances classes to the INDArray * @param result activations * @param input original Instances * @return activations with class value appended */ public static INDArray appendClasses(INDArray result, Instances input) { INDArray classes = Nd4j.zeros(result.shape()[0], 1); for (int i = 0; i < classes.length(); i++) { Instance inst = input.instance(i); classes.putScalar(i, inst.classValue()); } return Nd4j.concat(1, result, classes); }
Example 13
Source File: ConcatTestsC.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test(expected = ND4JIllegalStateException.class) public void testConcatVector() { Nd4j.concat(0, Nd4j.ones(1,1000000), Nd4j.create(1, 1)); }
Example 14
Source File: NormalizerStandardizeLabelsTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testBruteForce() { /* This test creates a dataset where feature values are multiples of consecutive natural numbers The obtained values are compared to the theoretical mean and std dev */ double tolerancePerc = 0.01; int nSamples = 5120; int x = 1, y = 2, z = 3; INDArray featureX = Nd4j.linspace(1, nSamples, nSamples).reshape(nSamples, 1).mul(x); INDArray featureY = featureX.mul(y); INDArray featureZ = featureX.mul(z); INDArray featureSet = Nd4j.concat(1, featureX, featureY, featureZ); INDArray labelSet = featureSet.dup().getColumns(0); DataSet sampleDataSet = new DataSet(featureSet, labelSet); double meanNaturalNums = (nSamples + 1) / 2.0; INDArray theoreticalMean = Nd4j.create(new double[] {meanNaturalNums * x, meanNaturalNums * y, meanNaturalNums * z}).reshape(1, -1).castTo(Nd4j.defaultFloatingPointType()); INDArray theoreticallabelMean = theoreticalMean.dup().getColumns(0); double stdNaturalNums = Math.sqrt((nSamples * nSamples - 1) / 12.0); INDArray theoreticalStd = Nd4j.create(new double[] {stdNaturalNums * x, stdNaturalNums * y, stdNaturalNums * z}).reshape(1, -1).castTo(Nd4j.defaultFloatingPointType()); INDArray theoreticallabelStd = theoreticalStd.dup().getColumns(0); NormalizerStandardize myNormalizer = new NormalizerStandardize(); myNormalizer.fitLabel(true); myNormalizer.fit(sampleDataSet); INDArray meanDelta = Transforms.abs(theoreticalMean.sub(myNormalizer.getMean())); INDArray labelDelta = Transforms.abs(theoreticallabelMean.sub(myNormalizer.getLabelMean())); INDArray meanDeltaPerc = meanDelta.div(theoreticalMean).mul(100); INDArray labelDeltaPerc = labelDelta.div(theoreticallabelMean).mul(100); double maxMeanDeltaPerc = meanDeltaPerc.max(1).getDouble(0); assertTrue(maxMeanDeltaPerc < tolerancePerc); assertTrue(labelDeltaPerc.max(1).getDouble(0) < tolerancePerc); INDArray stdDelta = Transforms.abs(theoreticalStd.sub(myNormalizer.getStd())); INDArray stdDeltaPerc = stdDelta.div(theoreticalStd).mul(100); INDArray stdlabelDeltaPerc = Transforms.abs(theoreticallabelStd.sub(myNormalizer.getLabelStd())).div(theoreticallabelStd); double maxStdDeltaPerc = stdDeltaPerc.max(1).mul(100).getDouble(0); double maxlabelStdDeltaPerc = stdlabelDeltaPerc.max(1).getDouble(0); assertTrue(maxStdDeltaPerc < tolerancePerc); assertTrue(maxlabelStdDeltaPerc < tolerancePerc); // SAME TEST WITH THE ITERATOR int bSize = 10; tolerancePerc = 0.1; // 1% of correct value DataSetIterator sampleIter = new TestDataSetIterator(sampleDataSet, bSize); myNormalizer.fit(sampleIter); meanDelta = Transforms.abs(theoreticalMean.sub(myNormalizer.getMean())); meanDeltaPerc = meanDelta.div(theoreticalMean).mul(100); maxMeanDeltaPerc = meanDeltaPerc.max(1).getDouble(0); assertTrue(maxMeanDeltaPerc < tolerancePerc); stdDelta = Transforms.abs(theoreticalMean.sub(myNormalizer.getMean())); stdDeltaPerc = stdDelta.div(theoreticalStd).mul(100); maxStdDeltaPerc = stdDeltaPerc.max(1).getDouble(0); assertTrue(maxStdDeltaPerc < tolerancePerc); }
Example 15
Source File: ConcatTests.java From nd4j with Apache License 2.0 | 4 votes |
@Test public void testConcat3d() { INDArray first = Nd4j.linspace(1, 24, 24).reshape('c', 2, 3, 4); INDArray second = Nd4j.linspace(24, 36, 12).reshape('c', 1, 3, 4); INDArray third = Nd4j.linspace(36, 48, 12).reshape('c', 1, 3, 4); //ConcatV2, dim 0 INDArray exp = Nd4j.create(2 + 1 + 1, 3, 4); exp.put(new INDArrayIndex[] {NDArrayIndex.interval(0, 2), NDArrayIndex.all(), NDArrayIndex.all()}, first); exp.put(new INDArrayIndex[] {NDArrayIndex.point(2), NDArrayIndex.all(), NDArrayIndex.all()}, second); exp.put(new INDArrayIndex[] {NDArrayIndex.point(3), NDArrayIndex.all(), NDArrayIndex.all()}, third); INDArray concat0 = Nd4j.concat(0, first, second, third); assertEquals(exp, concat0); System.out.println("1------------------------"); //ConcatV2, dim 1 second = Nd4j.linspace(24, 32, 8).reshape('c', 2, 1, 4); third = Nd4j.linspace(32, 48, 16).reshape('c', 2, 2, 4); exp = Nd4j.create(2, 3 + 1 + 2, 4); exp.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.interval(0, 3), NDArrayIndex.all()}, first); exp.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.point(3), NDArrayIndex.all()}, second); exp.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.interval(4, 6), NDArrayIndex.all()}, third); System.out.println("2------------------------"); INDArray concat1 = Nd4j.concat(1, first, second, third); assertEquals(exp, concat1); //ConcatV2, dim 2 second = Nd4j.linspace(24, 36, 12).reshape('c', 2, 3, 2); third = Nd4j.linspace(36, 42, 6).reshape('c', 2, 3, 1); exp = Nd4j.create(2, 3, 4 + 2 + 1); exp.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(0, 4)}, first); exp.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(4, 6)}, second); exp.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(6)}, third); INDArray concat2 = Nd4j.concat(2, first, second, third); assertEquals(exp, concat2); }
Example 16
Source File: MultiLayerTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testMLNUpdaterBlocks(){ //Check that setting learning rate results in correct rearrangement of updater state within updater blocks //https://github.com/deeplearning4j/deeplearning4j/issues/6809#issuecomment-463892644 double lr = 1e-3; MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(12345) .weightInit(WeightInit.XAVIER) .updater(new Adam(lr)) .list() .layer(new DenseLayer.Builder().nIn(5).nOut(3).build()) .layer(new DenseLayer.Builder().nIn(3).nOut(2).build()) .layer(new OutputLayer.Builder(LossFunctions.LossFunction.XENT).nIn(2).nOut(1) .activation(Activation.SIGMOID).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); INDArray in = Nd4j.rand(1, 5); INDArray lbl = Nd4j.rand(1,1); net.fit(new DataSet(in, lbl)); INDArray viewArray = net.getUpdater().getStateViewArray(); INDArray viewArrayCopy = viewArray.dup(); //Initially updater view array is set out like: //[m0w, m0b, m1w, m1b, m2w, m2b][v0w, v0b, v1w, v1b, v2w, v2b] long soFar = 0; INDArray m0w = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+5*3)).assign(0); //m0w soFar += 5*3; INDArray m0b = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+3)).assign(1); //m0b soFar += 3; INDArray m1w = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+3*2)).assign(2); //m1w soFar += 3*2; INDArray m1b = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+2)).assign(3); //m1b soFar += 2; INDArray m2w = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+2*1)).assign(4); //m2w soFar += 2*1; INDArray m2b = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+1)).assign(5); //m2b soFar += 1; INDArray v0w = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+5*3)).assign(6); //v0w soFar += 5*3; INDArray v0b = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+3)).assign(7); //v0b soFar += 3; INDArray v1w = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+3*2)).assign(8); //v1w soFar += 3*2; INDArray v1b = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+2)).assign(9); //v1b soFar += 2; INDArray v2w = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+2*1)).assign(10); //v2w soFar += 2*1; INDArray v2b = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+1)).assign(11); //v2b soFar += 1; net.setLearningRate(0, 0.0); //Expect new updater state to look like: //[m0w, m0b][v0w,v0b], [m1w, m1b, m2w, m2b][v1w, v1b, v2w, v2b] INDArray exp = Nd4j.concat(1, m0w, m0b, v0w, v0b, m1w, m1b, m2w, m2b, v1w, v1b, v2w, v2b); INDArray act = net.getUpdater().getStateViewArray(); // System.out.println(exp); // System.out.println(act); assertEquals(exp, act); //And set layer 1 LR: net.setLearningRate(1, 0.2); exp = Nd4j.concat(1, m0w, m0b, v0w, v0b, m1w, m1b, v1w, v1b, m2w, m2b, v2w, v2b); assertEquals(exp, net.getUpdater().getStateViewArray()); //Set all back to original LR and check again: net.setLearningRate(1, lr); net.setLearningRate(0, lr); exp = Nd4j.concat(1, m0w, m0b, m1w, m1b, m2w, m2b, v0w, v0b, v1w, v1b, v2w, v2b); assertEquals(exp, net.getUpdater().getStateViewArray()); //Finally, training sanity check (if things are wrong, we get -ve values in adam V, which causes NaNs) net.getUpdater().getStateViewArray().assign(viewArrayCopy); net.setLearningRate(0, 0.0); Nd4j.getExecutioner().setProfilingMode(OpExecutioner.ProfilingMode.NAN_PANIC); net.fit(new DataSet(in, lbl)); Nd4j.getExecutioner().setProfilingMode(OpExecutioner.ProfilingMode.SCOPE_PANIC); }
Example 17
Source File: StackVertex.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public INDArray doForward(boolean training, LayerWorkspaceMgr workspaceMgr) { // stacking along dimension 0 // inputs[] is an array of INDArray (e.g.: shape of 3 x [nExamples, nSize]) // what we want to do is make a stacked output (e.g.: [3 x nExamples, nSize]) lastInputShapes = null; int nStack = inputs.length; val inShape = inputs[0].shape(); val outShape = new long[inShape.length]; // create the new shape outShape[0] = nStack * inShape[0]; for (int i = 1; i < inShape.length; i++) { outShape[i] = inShape[i]; } boolean variableLengthTS = false; if (inShape.length == 3) { //RNN data - check for variable length time series long minLength = inputs[0].size(2); long maxLength = minLength; for (int i = 1; i < inputs.length; i++) { long thisLength = inputs[i].size(2); minLength = Math.min(minLength, thisLength); maxLength = Math.max(maxLength, thisLength); } variableLengthTS = (minLength != maxLength); if (!variableLengthTS) { try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATIONS)) { return Nd4j.concat(0, inputs); } } outShape[2] = maxLength; INDArray out = workspaceMgr.create(ArrayType.ACTIVATIONS, inputs[0].dataType(), outShape); long numExamples = inputs[0].size(0); lastInputShapes = new long[inputs.length][0]; for (int i = 0; i < inputs.length; i++) { out.put(new INDArrayIndex[] {NDArrayIndex.interval(i * numExamples, (i + 1) * numExamples), NDArrayIndex.all(), NDArrayIndex.interval(0, inputs[i].size(2))}, inputs[i]); lastInputShapes[i] = inputs[i].shape(); } return out; } else { try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATIONS)) { return Nd4j.concat(0, inputs); } } }
Example 18
Source File: NormalizerStandardizeTest.java From nd4j with Apache License 2.0 | 4 votes |
@Test public void testBruteForce() { /* This test creates a dataset where feature values are multiples of consecutive natural numbers The obtained values are compared to the theoretical mean and std dev */ double tolerancePerc = 0.01; // 0.01% of correct value int nSamples = 5120; int x = 1, y = 2, z = 3; INDArray featureX = Nd4j.linspace(1, nSamples, nSamples).reshape(nSamples, 1).mul(x); INDArray featureY = featureX.mul(y); INDArray featureZ = featureX.mul(z); INDArray featureSet = Nd4j.concat(1, featureX, featureY, featureZ); INDArray labelSet = Nd4j.zeros(nSamples, 1); DataSet sampleDataSet = new DataSet(featureSet, labelSet); double meanNaturalNums = (nSamples + 1) / 2.0; INDArray theoreticalMean = Nd4j.create(new double[] {meanNaturalNums * x, meanNaturalNums * y, meanNaturalNums * z}); double stdNaturalNums = Math.sqrt((nSamples * nSamples - 1) / 12.0); INDArray theoreticalStd = Nd4j.create(new double[] {stdNaturalNums * x, stdNaturalNums * y, stdNaturalNums * z}); NormalizerStandardize myNormalizer = new NormalizerStandardize(); myNormalizer.fit(sampleDataSet); INDArray meanDelta = Transforms.abs(theoreticalMean.sub(myNormalizer.getMean())); INDArray meanDeltaPerc = meanDelta.div(theoreticalMean).mul(100); double maxMeanDeltaPerc = meanDeltaPerc.max(1).getDouble(0, 0); assertTrue(maxMeanDeltaPerc < tolerancePerc); INDArray stdDelta = Transforms.abs(theoreticalStd.sub(myNormalizer.getStd())); INDArray stdDeltaPerc = stdDelta.div(theoreticalStd).mul(100); double maxStdDeltaPerc = stdDeltaPerc.max(1).getDouble(0, 0); assertTrue(maxStdDeltaPerc < tolerancePerc); // SAME TEST WITH THE ITERATOR int bSize = 10; tolerancePerc = 0.1; // 0.1% of correct value DataSetIterator sampleIter = new TestDataSetIterator(sampleDataSet, bSize); myNormalizer.fit(sampleIter); meanDelta = Transforms.abs(theoreticalMean.sub(myNormalizer.getMean())); meanDeltaPerc = meanDelta.div(theoreticalMean).mul(100); maxMeanDeltaPerc = meanDeltaPerc.max(1).getDouble(0, 0); assertTrue(maxMeanDeltaPerc < tolerancePerc); stdDelta = Transforms.abs(theoreticalStd.sub(myNormalizer.getStd())); stdDeltaPerc = stdDelta.div(theoreticalStd).mul(100); maxStdDeltaPerc = stdDeltaPerc.max(1).getDouble(0, 0); assertTrue(maxStdDeltaPerc < tolerancePerc); }
Example 19
Source File: SameDiffTrainingTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void simpleClassification() { double learning_rate = 0.001; int seed = 7; org.nd4j.linalg.api.rng.Random rng = Nd4j.getRandom(); rng.setSeed(seed); INDArray x1_label1 = Nd4j.randn(3.0, 1.0, new long[]{1000}, rng); INDArray x2_label1 = Nd4j.randn(2.0, 1.0, new long[]{1000}, rng); INDArray x1_label2 = Nd4j.randn(7.0, 1.0, new long[]{1000}, rng); INDArray x2_label2 = Nd4j.randn(6.0, 1.0, new long[]{1000}, rng); INDArray x1s = Nd4j.concat(0, x1_label1, x1_label2); INDArray x2s = Nd4j.concat(0, x2_label1, x2_label2); SameDiff sd = SameDiff.create(); INDArray ys = Nd4j.scalar(0.0).mul(x1_label1.length()).add(Nd4j.scalar(1.0).mul(x1_label2.length())); SDVariable X1 = sd.placeHolder("x1", DataType.DOUBLE, 2000); SDVariable X2 = sd.placeHolder("x2", DataType.DOUBLE, 2000); SDVariable y = sd.placeHolder("y", DataType.DOUBLE); SDVariable w = sd.var("w", DataType.DOUBLE, 3); // TF code: //cost = tf.reduce_mean(-tf.log(y_model * Y + (1 — y_model) * (1 — Y))) SDVariable y_model = sd.nn.sigmoid(w.get(SDIndex.point(2)).mul(X2).add(w.get(SDIndex.point(1)).mul(X1)).add(w.get(SDIndex.point(0)))); SDVariable cost_fun = (sd.math.neg(sd.math.log(y_model.mul(y).add((sd.math.log(sd.constant(1.0).minus(y_model)).mul(sd.constant(1.0).minus(y))))))); SDVariable loss = sd.mean("loss", cost_fun); val updater = new Sgd(learning_rate); sd.setLossVariables("loss"); sd.createGradFunction(); val conf = new TrainingConfig.Builder() .updater(updater) .minimize("loss") .dataSetFeatureMapping("x1", "x2", "y") .markLabelsUnused() .build(); MultiDataSet mds = new MultiDataSet(new INDArray[]{x1s, x2s, ys},null); sd.setTrainingConfig(conf); History history = sd.fit(new SingletonMultiDataSetIterator(mds), 1); }
Example 20
Source File: MergeVertex.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public INDArray doForward(boolean training, LayerWorkspaceMgr workspaceMgr) { if (!canDoForward()) throw new IllegalStateException("Cannot do forward pass: inputs not set"); if (inputs.length == 1) { //No-op case val shape = inputs[0].shape(); forwardPassShapes = new long[][] {Arrays.copyOf(shape, shape.length)}; return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, inputs[0]); } INDArray[] in = new INDArray[inputs.length]; for( int i=0; i<in.length; i++ ){ in[i] = inputs[i].castTo(dataType); //No-op if correct type } forwardPassShapes = new long[in.length][0]; val nExamples = in[0].size(0); fwdPassRank = in[0].rank(); for (int i = 0; i < in.length; i++) { val currShape = in[i].shape(); if (fwdPassRank != currShape.length) { throw new IllegalStateException( "Cannot merge activations with different ranks: first activations have rank " + fwdPassRank + ", activations[" + i + "] have rank " + currShape.length + " (shape=" + Arrays.toString(currShape) + ")"); } forwardPassShapes[i] = Arrays.copyOf(currShape, currShape.length); if (currShape[0] != nExamples) { throw new IllegalStateException( "Cannot merge activations with different number of examples (activations[0] shape: " + Arrays.toString(in[0].shape()) + ", activations[" + i + "] shape: " + Arrays.toString(in[i].shape())); } } try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATIONS)){ INDArray out = Nd4j.concat(mergeAxis, in); return out; } }