Java Code Examples for org.deeplearning4j.nn.multilayer.MultiLayerNetwork#toComputationGraph()
The following examples show how to use
org.deeplearning4j.nn.multilayer.MultiLayerNetwork#toComputationGraph() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestLrChanges.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testChangeLSGD() { //Simple test for no updater nets MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .activation(Activation.TANH) .seed(12345) .updater(new Sgd(0.1)) .list() .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(new OutputLayer.Builder().nIn(10).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); net.setLearningRate(1.0); net.setLearningRate(1, 0.5); assertEquals(1.0, net.getLearningRate(0), 0.0); assertEquals(0.5, net.getLearningRate(1), 0.0); ComputationGraph cg = net.toComputationGraph(); cg.setLearningRate(2.0); cg.setLearningRate("1", 2.5); assertEquals(2.0, cg.getLearningRate("0"), 0.0); assertEquals(2.5, cg.getLearningRate("1"), 0.0); }
Example 2
Source File: CNNGradientCheckTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testCnnWithSpaceToBatch() { Nd4j.getRandom().setSeed(12345); int nOut = 4; int[] minibatchSizes = {2, 4}; int width = 5; int height = 5; int inputDepth = 1; int[] kernel = {2, 2}; int[] blocks = {2, 2}; String[] activations = {"sigmoid", "tanh"}; SubsamplingLayer.PoolingType[] poolingTypes = new SubsamplingLayer.PoolingType[]{SubsamplingLayer.PoolingType.MAX, SubsamplingLayer.PoolingType.AVG, SubsamplingLayer.PoolingType.PNORM}; boolean nchw = format == CNN2DFormat.NCHW; for (String afn : activations) { for (SubsamplingLayer.PoolingType poolingType : poolingTypes) { for (int minibatchSize : minibatchSizes) { long[] inShape = nchw ? new long[]{minibatchSize, inputDepth, height, width} : new long[]{minibatchSize, height, width, inputDepth}; INDArray input = Nd4j.rand(DataType.DOUBLE, inShape); INDArray labels = Nd4j.zeros(4 * minibatchSize, nOut); for (int i = 0; i < 4 * minibatchSize; i++) { labels.putScalar(new int[]{i, i % nOut}, 1.0); } MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .dataType(DataType.DOUBLE) .updater(new NoOp()).weightInit(new NormalDistribution(0, 1)) .list() .layer(new ConvolutionLayer.Builder(kernel).nIn(inputDepth).nOut(3).build()) .layer(new SpaceToBatchLayer.Builder(blocks).build()) //trivial space to batch .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX) .nOut(nOut).build()) .setInputType(InputType.convolutional(height, width, inputDepth, format)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); String msg = format + " - poolingType=" + poolingType + ", minibatch=" + minibatchSize + ", activationFn=" + afn; if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < net.getnLayers(); j++) // System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels); assertTrue(msg, gradOK); //Also check compgraph: ComputationGraph cg = net.toComputationGraph(); gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(cg).inputs(new INDArray[]{input}) .labels(new INDArray[]{labels})); assertTrue(msg + " - compgraph", gradOK); TestUtils.testModelSerialization(net); } } } }
Example 3
Source File: UtilLayerGradientChecks.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testFrozenWithBackprop(){ for( int minibatch : new int[]{1,5}) { MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() .dataType(DataType.DOUBLE) .seed(12345) .updater(Updater.NONE) .list() .layer(new DenseLayer.Builder().nIn(10).nOut(10) .activation(Activation.TANH).weightInit(WeightInit.XAVIER).build()) .layer(new FrozenLayerWithBackprop(new DenseLayer.Builder().nIn(10).nOut(10) .activation(Activation.TANH).weightInit(WeightInit.XAVIER).build())) .layer(new FrozenLayerWithBackprop( new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).build())) .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(10).nOut(10).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf2); net.init(); INDArray in = Nd4j.rand(minibatch, 10); INDArray labels = TestUtils.randomOneHot(minibatch, 10); Set<String> excludeParams = new HashSet<>(); excludeParams.addAll(Arrays.asList("1_W", "1_b", "2_W", "2_b")); boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.MLNConfig().net(net).input(in) .labels(labels).excludeParams(excludeParams)); assertTrue(gradOK); TestUtils.testModelSerialization(net); //Test ComputationGraph equivalent: ComputationGraph g = net.toComputationGraph(); boolean gradOKCG = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(g) .minAbsoluteError(1e-6) .inputs(new INDArray[]{in}).labels(new INDArray[]{labels}).excludeParams(excludeParams)); assertTrue(gradOKCG); TestUtils.testModelSerialization(g); } }
Example 4
Source File: TestNetConversion.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testMlnToCompGraph() { Nd4j.getRandom().setSeed(12345); for( int i=0; i<3; i++ ){ MultiLayerNetwork n; switch (i){ case 0: n = getNet1(false); break; case 1: n = getNet1(true); break; case 2: n = getNet2(); break; default: throw new RuntimeException(); } INDArray in = (i <= 1 ? Nd4j.rand(new int[]{8, 3, 10, 10}) : Nd4j.rand(new int[]{8, 5, 10})); INDArray labels = (i <= 1 ? Nd4j.rand(new int[]{8, 10}) : Nd4j.rand(new int[]{8, 10, 10})); ComputationGraph cg = n.toComputationGraph(); INDArray out1 = n.output(in); INDArray out2 = cg.outputSingle(in); assertEquals(out1, out2); n.setInput(in); n.setLabels(labels); cg.setInputs(in); cg.setLabels(labels); n.computeGradientAndScore(); cg.computeGradientAndScore(); assertEquals(n.score(), cg.score(), 1e-6); assertEquals(n.gradient().gradient(), cg.gradient().gradient()); n.fit(in, labels); cg.fit(new INDArray[]{in}, new INDArray[]{labels}); assertEquals(n.params(), cg.params()); } }
Example 5
Source File: TestCompGraphUnsupervised.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void compareImplementations() throws Exception { for(WorkspaceMode wsm : new WorkspaceMode[]{WorkspaceMode.NONE, WorkspaceMode.ENABLED}) { MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() .seed(12345) .updater(new Adam(1e-3)) .weightInit(WeightInit.XAVIER) .inferenceWorkspaceMode(wsm) .trainingWorkspaceMode(wsm) .list() .layer(new VariationalAutoencoder.Builder() .nIn(784) .nOut(32) .encoderLayerSizes(16) .decoderLayerSizes(16) .activation(Activation.TANH) .pzxActivationFunction(Activation.SIGMOID) .reconstructionDistribution(new BernoulliReconstructionDistribution(Activation.SIGMOID)) .build()) .layer(new VariationalAutoencoder.Builder() .nIn(32) .nOut(8) .encoderLayerSizes(16) .decoderLayerSizes(16) .activation(Activation.TANH) .pzxActivationFunction(Activation.SIGMOID) .reconstructionDistribution(new GaussianReconstructionDistribution(Activation.TANH)) .build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf2); net.init(); ComputationGraph cg = net.toComputationGraph(); cg.getConfiguration().setInferenceWorkspaceMode(wsm); cg.getConfiguration().setTrainingWorkspaceMode(wsm); DataSetIterator ds = new EarlyTerminationDataSetIterator(new MnistDataSetIterator(1, true, 12345), 1); Nd4j.getRandom().setSeed(12345); net.pretrainLayer(0, ds); ds = new EarlyTerminationDataSetIterator(new MnistDataSetIterator(1, true, 12345), 1); Nd4j.getRandom().setSeed(12345); cg.pretrainLayer("0", ds); assertEquals(net.params(), cg.params()); } }
Example 6
Source File: TestListeners.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testListenerCalls(){ MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .list() .layer(new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); TestListener tl = new TestListener(); net.setListeners(tl); DataSetIterator irisIter = new IrisDataSetIterator(50, 150); net.fit(irisIter, 2); List<Triple<Call,Integer,Integer>> exp = new ArrayList<>(); exp.add(new Triple<>(Call.EPOCH_START, 0, 0)); exp.add(new Triple<>(Call.ON_FWD, 0, 0)); exp.add(new Triple<>(Call.ON_BWD, 0, 0)); exp.add(new Triple<>(Call.ON_GRAD, 0, 0)); exp.add(new Triple<>(Call.ITER_DONE, 0, 0)); exp.add(new Triple<>(Call.ON_FWD, 1, 0)); exp.add(new Triple<>(Call.ON_BWD, 1, 0)); exp.add(new Triple<>(Call.ON_GRAD, 1, 0)); exp.add(new Triple<>(Call.ITER_DONE, 1, 0)); exp.add(new Triple<>(Call.ON_FWD, 2, 0)); exp.add(new Triple<>(Call.ON_BWD, 2, 0)); exp.add(new Triple<>(Call.ON_GRAD, 2, 0)); exp.add(new Triple<>(Call.ITER_DONE, 2, 0)); exp.add(new Triple<>(Call.EPOCH_END, 3, 0)); //Post updating iter count, pre update epoch count exp.add(new Triple<>(Call.EPOCH_START, 3, 1)); exp.add(new Triple<>(Call.ON_FWD, 3, 1)); exp.add(new Triple<>(Call.ON_BWD, 3, 1)); exp.add(new Triple<>(Call.ON_GRAD, 3, 1)); exp.add(new Triple<>(Call.ITER_DONE, 3, 1)); exp.add(new Triple<>(Call.ON_FWD, 4, 1)); exp.add(new Triple<>(Call.ON_BWD, 4, 1)); exp.add(new Triple<>(Call.ON_GRAD, 4, 1)); exp.add(new Triple<>(Call.ITER_DONE, 4, 1)); exp.add(new Triple<>(Call.ON_FWD, 5, 1)); exp.add(new Triple<>(Call.ON_BWD, 5, 1)); exp.add(new Triple<>(Call.ON_GRAD, 5, 1)); exp.add(new Triple<>(Call.ITER_DONE, 5, 1)); exp.add(new Triple<>(Call.EPOCH_END, 6, 1)); assertEquals(exp, tl.getCalls()); tl = new TestListener(); ComputationGraph cg = net.toComputationGraph(); cg.setListeners(tl); cg.fit(irisIter, 2); assertEquals(exp, tl.getCalls()); }
Example 7
Source File: TestConvolutionalListener.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test @Ignore //Should be run manually public void testUI() throws Exception { int nChannels = 1; // Number of input channels int outputNum = 10; // The number of possible outcomes int batchSize = 64; // Test batch size DataSetIterator mnistTrain = new MnistDataSetIterator(batchSize, true, 12345); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) // Training iterations as above .l2(0.0005).weightInit(WeightInit.XAVIER) .updater(new Nesterovs(0.01, 0.9)).list() .layer(0, new ConvolutionLayer.Builder(5, 5) //nIn and nOut specify depth. nIn here is the nChannels and nOut is the number of filters to be applied .nIn(nChannels).stride(1, 1).nOut(20).activation(Activation.IDENTITY).build()) .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX).kernelSize(2, 2) .stride(2, 2).build()) .layer(2, new ConvolutionLayer.Builder(5, 5) //Note that nIn need not be specified in later layers .stride(1, 1).nOut(50).activation(Activation.IDENTITY).build()) .layer(3, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX).kernelSize(2, 2) .stride(2, 2).build()) .layer(4, new DenseLayer.Builder().activation(Activation.RELU).nOut(500).build()) .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(outputNum).activation(Activation.SOFTMAX).build()) .setInputType(InputType.convolutionalFlat(28, 28, 1)) //See note below .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); net.setListeners(new ConvolutionalIterationListener(1), new ScoreIterationListener(1)); for (int i = 0; i < 10; i++) { net.fit(mnistTrain.next()); Thread.sleep(1000); } ComputationGraph cg = net.toComputationGraph(); cg.setListeners(new ConvolutionalIterationListener(1), new ScoreIterationListener(1)); for (int i = 0; i < 10; i++) { cg.fit(mnistTrain.next()); Thread.sleep(1000); } Thread.sleep(100000); }