Java Code Examples for org.deeplearning4j.nn.graph.ComputationGraph#setInputs()
The following examples show how to use
org.deeplearning4j.nn.graph.ComputationGraph#setInputs() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: WorkspaceTests.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void checkScopesTestCGAS() throws Exception { ComputationGraph c = createNet(); for (WorkspaceMode wm : new WorkspaceMode[]{WorkspaceMode.NONE, WorkspaceMode.ENABLED}) { log.info("Starting test: {}", wm); c.getConfiguration().setTrainingWorkspaceMode(wm); c.getConfiguration().setInferenceWorkspaceMode(wm); INDArray f = Nd4j.rand(new int[]{8, 1, 28, 28}); INDArray l = Nd4j.rand(8, 10); c.setInputs(f); c.setLabels(l); c.computeGradientAndScore(); } }
Example 2
Source File: WorkspaceTests.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testWithPreprocessorsCG() { //https://github.com/deeplearning4j/deeplearning4j/issues/4347 //Cause for the above issue was layerVertex.setInput() applying the preprocessor, with the result // not being detached properly from the workspace... for (WorkspaceMode wm : WorkspaceMode.values()) { System.out.println(wm); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() .trainingWorkspaceMode(wm) .inferenceWorkspaceMode(wm) .graphBuilder() .addInputs("in") .addLayer("e", new GravesLSTM.Builder().nIn(10).nOut(5).build(), new DupPreProcessor(), "in") // .addLayer("e", new GravesLSTM.Builder().nIn(10).nOut(5).build(), "in") //Note that no preprocessor is OK .addLayer("rnn", new GravesLSTM.Builder().nIn(5).nOut(8).build(), "e") .addLayer("out", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE) .activation(Activation.SIGMOID).nOut(3).build(), "rnn") .setInputTypes(InputType.recurrent(10)) .setOutputs("out") .build(); ComputationGraph cg = new ComputationGraph(conf); cg.init(); INDArray[] input = new INDArray[]{Nd4j.zeros(1, 10, 5)}; for (boolean train : new boolean[]{false, true}) { cg.clear(); cg.feedForward(input, train); } cg.setInputs(input); cg.setLabels(Nd4j.rand(new int[]{1, 3, 5})); cg.computeGradientAndScore(); } }
Example 3
Source File: TestGraphNodes.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testDuplicateToTimeSeriesVertex() { ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder() .addInputs("in2d", "in3d") .addVertex("duplicateTS", new DuplicateToTimeSeriesVertex("in3d"), "in2d") .addLayer("out", new OutputLayer.Builder().nIn(1).nOut(1).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build(), "duplicateTS") .addLayer("out3d", new RnnOutputLayer.Builder().nIn(1).nOut(1).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build(), "in3d") .setOutputs("out", "out3d").build(); ComputationGraph graph = new ComputationGraph(conf); graph.init(); INDArray in2d = Nd4j.rand(3, 5); INDArray in3d = Nd4j.rand(new int[] {3, 2, 7}); graph.setInputs(in2d, in3d); INDArray expOut = Nd4j.zeros(3, 5, 7); for (int i = 0; i < 7; i++) { expOut.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(i)}, in2d); } GraphVertex gv = graph.getVertex("duplicateTS"); gv.setInputs(in2d); INDArray outFwd = gv.doForward(true, LayerWorkspaceMgr.noWorkspaces()); assertEquals(expOut, outFwd); INDArray expOutBackward = expOut.sum(2); gv.setEpsilon(expOut); INDArray outBwd = gv.doBackward(false, LayerWorkspaceMgr.noWorkspaces()).getSecond()[0]; assertEquals(expOutBackward, outBwd); String json = conf.toJson(); ComputationGraphConfiguration conf2 = ComputationGraphConfiguration.fromJson(json); assertEquals(conf, conf2); }
Example 4
Source File: GradientCheckTests.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void elementWiseMultiplicationLayerTest(){ for(Activation a : new Activation[]{Activation.IDENTITY, Activation.TANH}) { ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).updater(new NoOp()) .seed(12345L) .weightInit(new UniformDistribution(0, 1)) .graphBuilder() .addInputs("features") .addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(4) .activation(Activation.TANH) .build(), "features") .addLayer("elementWiseMul", new ElementWiseMultiplicationLayer.Builder().nIn(4).nOut(4) .activation(a) .build(), "dense") .addLayer("loss", new LossLayer.Builder(LossFunctions.LossFunction.COSINE_PROXIMITY) .activation(Activation.IDENTITY).build(), "elementWiseMul") .setOutputs("loss") .build(); ComputationGraph netGraph = new ComputationGraph(conf); netGraph.init(); log.info("params before learning: " + netGraph.getLayer(1).paramTable()); //Run a number of iterations of learning manually make some pseudo data //the ides is simple: since we do a element wise multiplication layer (just a scaling), we want the cos sim // is mainly decided by the fourth value, if everything runs well, we will get a large weight for the fourth value INDArray features = Nd4j.create(new double[][]{{1, 2, 3, 4}, {1, 2, 3, 1}, {1, 2, 3, 0}}); INDArray labels = Nd4j.create(new double[][]{{1, 1, 1, 8}, {1, 1, 1, 2}, {1, 1, 1, 1}}); netGraph.setInputs(features); netGraph.setLabels(labels); netGraph.computeGradientAndScore(); double scoreBefore = netGraph.score(); String msg; for (int epoch = 0; epoch < 5; epoch++) netGraph.fit(new INDArray[]{features}, new INDArray[]{labels}); netGraph.computeGradientAndScore(); double scoreAfter = netGraph.score(); //Can't test in 'characteristic mode of operation' if not learning msg = "elementWiseMultiplicationLayerTest() - score did not (sufficiently) decrease during learning - activationFn=" + "Id" + ", lossFn=" + "Cos-sim" + ", outputActivation=" + "Id" + ", doLearningFirst=" + "true" + " (before=" + scoreBefore + ", scoreAfter=" + scoreAfter + ")"; assertTrue(msg, scoreAfter < 0.8 * scoreBefore); // expectation in case linear regression(with only element wise multiplication layer): large weight for the fourth weight log.info("params after learning: " + netGraph.getLayer(1).paramTable()); boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(netGraph).inputs(new INDArray[]{features}) .labels(new INDArray[]{labels})); msg = "elementWiseMultiplicationLayerTest() - activationFn=" + "ID" + ", lossFn=" + "Cos-sim" + ", outputActivation=" + "Id" + ", doLearningFirst=" + "true"; assertTrue(msg, gradOK); TestUtils.testModelSerialization(netGraph); } }
Example 5
Source File: TestMultiModelGradientApplication.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testGradientApplyComputationGraph() { int minibatch = 7; int nIn = 10; int nOut = 10; for (boolean regularization : new boolean[] {false, true}) { for (IUpdater u : new IUpdater[] {new Sgd(0.1), new Adam(0.1)}) { ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).updater(u) .l1(regularization ? 0.2 : 0.0) .l2(regularization ? 0.3 : 0.0).graphBuilder().addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(nIn).nOut(10).build(), "in") .addLayer("1", new DenseLayer.Builder().nIn(10).nOut(10).build(), "0") .addLayer("2", new OutputLayer.Builder( LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(10) .nOut(nOut).build(), "1") .setOutputs("2").build(); Nd4j.getRandom().setSeed(12345); ComputationGraph net1GradCalc = new ComputationGraph(conf); net1GradCalc.init(); Nd4j.getRandom().setSeed(12345); ComputationGraph net2GradUpd = new ComputationGraph(conf.clone()); net2GradUpd.init(); assertEquals(net1GradCalc.params(), net2GradUpd.params()); INDArray f = Nd4j.rand(minibatch, nIn); INDArray l = Nd4j.create(minibatch, nOut); for (int i = 0; i < minibatch; i++) { l.putScalar(i, i % nOut, 1.0); } net1GradCalc.setInputs(f); net1GradCalc.setLabels(l); net2GradUpd.setInputs(f); net2GradUpd.setLabels(l); //Calculate gradient in first net, update and apply it in the second //Also: calculate gradient in the second net, just to be sure it isn't modified while doing updating on // the other net's gradient net1GradCalc.computeGradientAndScore(); net2GradUpd.computeGradientAndScore(); Gradient g = net1GradCalc.gradient(); INDArray gBefore = g.gradient().dup(); //Net 1 gradient should be modified INDArray net2GradBefore = net2GradUpd.gradient().gradient().dup(); //But net 2 gradient should not be net2GradUpd.getUpdater().update(g, 0, 0, minibatch, LayerWorkspaceMgr.noWorkspaces()); INDArray gAfter = g.gradient().dup(); INDArray net2GradAfter = net2GradUpd.gradient().gradient().dup(); assertNotEquals(gBefore, gAfter); //Net 1 gradient should be modified assertEquals(net2GradBefore, net2GradAfter); //But net 2 gradient should not be //Also: if we apply the gradient using a subi op, we should get the same final params as if we did a fit op // on the original network net2GradUpd.params().subi(g.gradient()); net1GradCalc.fit(new INDArray[] {f}, new INDArray[] {l}); assertEquals(net1GradCalc.params(), net2GradUpd.params()); //============================= if (!(u instanceof Sgd)) { net2GradUpd.getUpdater().getStateViewArray().assign(net1GradCalc.getUpdater().getStateViewArray()); } assertEquals(net1GradCalc.params(), net2GradUpd.params()); assertEquals(net1GradCalc.getUpdater().getStateViewArray(), net2GradUpd.getUpdater().getStateViewArray()); //Remove the next 2 lines: fails - as net 1 is 1 iteration ahead net1GradCalc.getConfiguration().setIterationCount(0); net2GradUpd.getConfiguration().setIterationCount(0); for (int i = 0; i < 100; i++) { net1GradCalc.fit(new INDArray[] {f}, new INDArray[] {l}); net2GradUpd.fit(new INDArray[] {f}, new INDArray[] {l}); assertEquals(net1GradCalc.params(), net2GradUpd.params()); } } } }
Example 6
Source File: TestNetConversion.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testMlnToCompGraph() { Nd4j.getRandom().setSeed(12345); for( int i=0; i<3; i++ ){ MultiLayerNetwork n; switch (i){ case 0: n = getNet1(false); break; case 1: n = getNet1(true); break; case 2: n = getNet2(); break; default: throw new RuntimeException(); } INDArray in = (i <= 1 ? Nd4j.rand(new int[]{8, 3, 10, 10}) : Nd4j.rand(new int[]{8, 5, 10})); INDArray labels = (i <= 1 ? Nd4j.rand(new int[]{8, 10}) : Nd4j.rand(new int[]{8, 10, 10})); ComputationGraph cg = n.toComputationGraph(); INDArray out1 = n.output(in); INDArray out2 = cg.outputSingle(in); assertEquals(out1, out2); n.setInput(in); n.setLabels(labels); cg.setInputs(in); cg.setLabels(labels); n.computeGradientAndScore(); cg.computeGradientAndScore(); assertEquals(n.score(), cg.score(), 1e-6); assertEquals(n.gradient().gradient(), cg.gradient().gradient()); n.fit(in, labels); cg.fit(new INDArray[]{in}, new INDArray[]{labels}); assertEquals(n.params(), cg.params()); } }
Example 7
Source File: LocallyConnectedLayerTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testLocallyConnected(){ for (DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) { Nd4j.setDefaultDataTypes(globalDtype, globalDtype); for (DataType networkDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) { assertEquals(globalDtype, Nd4j.dataType()); assertEquals(globalDtype, Nd4j.defaultFloatingPointType()); for (int test = 0; test < 2; test++) { String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", test=" + test; ComputationGraphConfiguration.GraphBuilder b = new NeuralNetConfiguration.Builder() .dataType(networkDtype) .seed(123) .updater(new NoOp()) .weightInit(WeightInit.XAVIER) .convolutionMode(ConvolutionMode.Same) .graphBuilder(); INDArray[] in; INDArray label; switch (test){ case 0: b.addInputs("in") .addLayer("1", new LSTM.Builder().nOut(5).build(), "in") .addLayer("2", new LocallyConnected1D.Builder().kernelSize(2).nOut(4).build(), "1") .addLayer("out", new RnnOutputLayer.Builder().nOut(10).build(), "2") .setOutputs("out") .setInputTypes(InputType.recurrent(5, 4)); in = new INDArray[]{Nd4j.rand(networkDtype, 2, 5, 4)}; label = TestUtils.randomOneHotTimeSeries(2, 10, 4).castTo(networkDtype); break; case 1: b.addInputs("in") .addLayer("1", new ConvolutionLayer.Builder().kernelSize(2,2).nOut(5).convolutionMode(ConvolutionMode.Same).build(), "in") .addLayer("2", new LocallyConnected2D.Builder().kernelSize(2,2).nOut(5).build(), "1") .addLayer("out", new OutputLayer.Builder().nOut(10).build(), "2") .setOutputs("out") // .setInputTypes(InputType.convolutional(28, 28, 1)); // in = new INDArray[]{Nd4j.rand(networkDtype, 2, 1, 28, 28)}; .setInputTypes(InputType.convolutional(8, 8, 1)); in = new INDArray[]{Nd4j.rand(networkDtype, 2, 1, 8, 8)}; label = TestUtils.randomOneHot(2, 10).castTo(networkDtype); break; default: throw new RuntimeException(); } ComputationGraph net = new ComputationGraph(b.build()); net.init(); INDArray out = net.outputSingle(in); assertEquals(msg, networkDtype, out.dataType()); Map<String, INDArray> ff = net.feedForward(in, false); for (Map.Entry<String, INDArray> e : ff.entrySet()) { if (e.getKey().equals("in")) continue; String s = msg + " - layer: " + e.getKey(); assertEquals(s, networkDtype, e.getValue().dataType()); } net.setInputs(in); net.setLabels(label); net.computeGradientAndScore(); net.fit(new MultiDataSet(in, new INDArray[]{label})); } } } }
Example 8
Source File: DTypeTests.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testLocallyConnected() { for (DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) { Nd4j.setDefaultDataTypes(globalDtype, globalDtype); for (DataType networkDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) { assertEquals(globalDtype, Nd4j.dataType()); assertEquals(globalDtype, Nd4j.defaultFloatingPointType()); INDArray[] in = null; for (int test = 0; test < 2; test++) { String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", test=" + test; ComputationGraphConfiguration.GraphBuilder b = new NeuralNetConfiguration.Builder() .dataType(networkDtype) .seed(123) .updater(new NoOp()) .weightInit(WeightInit.XAVIER) .convolutionMode(ConvolutionMode.Same) .graphBuilder(); INDArray label; switch (test) { case 0: b.addInputs("in") .addLayer("1", new LSTM.Builder().nOut(5).build(), "in") .addLayer("2", new LocallyConnected1D.Builder().kernelSize(2).nOut(4).build(), "1") .addLayer("out", new RnnOutputLayer.Builder().nOut(10).build(), "2") .setOutputs("out") .setInputTypes(InputType.recurrent(5, 2)); in = new INDArray[]{Nd4j.rand(networkDtype, 2, 5, 2)}; label = TestUtils.randomOneHotTimeSeries(2, 10, 2); break; case 1: b.addInputs("in") .addLayer("1", new ConvolutionLayer.Builder().kernelSize(2, 2).nOut(5).convolutionMode(ConvolutionMode.Same).build(), "in") .addLayer("2", new LocallyConnected2D.Builder().kernelSize(2, 2).nOut(5).build(), "1") .addLayer("out", new OutputLayer.Builder().nOut(10).build(), "2") .setOutputs("out") .setInputTypes(InputType.convolutional(8, 8, 1)); in = new INDArray[]{Nd4j.rand(networkDtype, 2, 1, 8, 8)}; label = TestUtils.randomOneHot(2, 10).castTo(networkDtype); break; default: throw new RuntimeException(); } ComputationGraph net = new ComputationGraph(b.build()); net.init(); INDArray out = net.outputSingle(in); assertEquals(msg, networkDtype, out.dataType()); Map<String, INDArray> ff = net.feedForward(in, false); for (Map.Entry<String, INDArray> e : ff.entrySet()) { if (e.getKey().equals("in")) continue; String s = msg + " - layer: " + e.getKey(); assertEquals(s, networkDtype, e.getValue().dataType()); } net.setInputs(in); net.setLabels(label); net.computeGradientAndScore(); net.fit(new MultiDataSet(in, new INDArray[]{label})); logUsedClasses(net); //Now, test mismatched dtypes for input/labels: for (DataType inputLabelDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) { INDArray[] in2 = new INDArray[in.length]; for (int i = 0; i < in.length; i++) { in2[i] = in[i].castTo(inputLabelDtype); } INDArray label2 = label.castTo(inputLabelDtype); net.output(in2); net.setInputs(in2); net.setLabels(label2); net.computeGradientAndScore(); net.fit(new MultiDataSet(in2, new INDArray[]{label2})); } } } } }