org.nd4j.linalg.learning.config.NoOp Java Examples

The following examples show how to use org.nd4j.linalg.learning.config.NoOp. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestSimpleRnn.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testBiasInit(){
    Nd4j.getRandom().setSeed(12345);
    int nIn = 5;
    int layerSize = 6;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .updater(new NoOp())
            .weightInit(WeightInit.XAVIER)
            .activation(Activation.TANH)
            .list()
            .layer(new SimpleRnn.Builder().nIn(nIn).nOut(layerSize).dataFormat(rnnDataFormat)
                    .biasInit(100)
                    .build())
            .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    INDArray bArr = net.getParam("0_b");
    assertEquals(Nd4j.valueArrayOf(new long[]{1,layerSize}, 100.0f), bArr);
}
 
Example #2
Source File: GradientCheckTestsComputationGraph.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
    public void testLSTMWithSubset() {
        Nd4j.getRandom().setSeed(1234);
        ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(1234)
                        .dataType(DataType.DOUBLE)
                        .weightInit(new NormalDistribution(0, 1))
                        .updater(new NoOp()).graphBuilder().addInputs("input").setOutputs("out")
                        .addLayer("lstm1", new LSTM.Builder().nIn(3).nOut(6).activation(Activation.TANH).build(),
                                        "input")
                        .addVertex("subset", new SubsetVertex(0, 2), "lstm1")
                        .addLayer("out", new RnnOutputLayer.Builder().nIn(3).nOut(2).activation(Activation.SOFTMAX)
                                        .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "subset")
                        .build();

        ComputationGraph graph = new ComputationGraph(conf);
        graph.init();

        Random r = new Random(12345);
        INDArray input = Nd4j.rand(new int[] {2, 3, 4});
        INDArray labels = TestUtils.randomOneHotTimeSeries(2, 2, 4);

        if (PRINT_RESULTS) {
            System.out.println("testLSTMWithSubset()");
//            for (int j = 0; j < graph.getNumLayers(); j++)
//                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input})
                .labels(new INDArray[]{labels}));

        String msg = "testLSTMWithSubset()";
        assertTrue(msg, gradOK);
        TestUtils.testModelSerialization(graph);
    }
 
Example #3
Source File: GradientCheckTestsComputationGraph.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
    public void testMultipleOutputsLayer() {
        Nd4j.getRandom().setSeed(12345);
        ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                        .dataType(DataType.DOUBLE)
                        .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                        .dist(new NormalDistribution(0, 1))
                        .updater(new NoOp()).activation(Activation.TANH).graphBuilder().addInputs("i0")
                        .addLayer("d0", new DenseLayer.Builder().nIn(2).nOut(2).build(), "i0")
                        .addLayer("d1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "d0")
                        .addLayer("d2", new DenseLayer.Builder().nIn(2).nOut(2).build(), "d0")
                        .addLayer("d3", new DenseLayer.Builder().nIn(2).nOut(2).build(), "d0")
                        .addLayer("out", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(6)
                                        .nOut(2).build(), "d1", "d2", "d3")
                        .setOutputs("out").build();

        ComputationGraph graph = new ComputationGraph(conf);
        graph.init();

        int[] minibatchSizes = {1, 3};
        for (int mb : minibatchSizes) {
            INDArray input = Nd4j.rand(mb, 2);
            INDArray out = Nd4j.rand(mb, 2);


            String msg = "testMultipleOutputsLayer() - minibatchSize = " + mb;
            if (PRINT_RESULTS) {
                System.out.println(msg);
//                for (int j = 0; j < graph.getNumLayers(); j++)
//                    System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
            }

            boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input})
                    .labels(new INDArray[]{out}));

            assertTrue(msg, gradOK);
            TestUtils.testModelSerialization(graph);
        }
    }
 
Example #4
Source File: BatchNormalization.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public IUpdater getUpdaterByParam(String paramName) {
    switch (paramName) {
        case BatchNormalizationParamInitializer.BETA:
        case BatchNormalizationParamInitializer.GAMMA:
            return iUpdater;
        case BatchNormalizationParamInitializer.GLOBAL_MEAN:
        case BatchNormalizationParamInitializer.GLOBAL_VAR:
        case BatchNormalizationParamInitializer.GLOBAL_LOG_STD:
            return new NoOp();
        default:
            throw new IllegalArgumentException("Unknown parameter: \"" + paramName + "\"");
    }
}
 
Example #5
Source File: GradientCheckTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
    public void testEmbeddingLayerPreluSimple() {
        Random r = new Random(12345);
        int nExamples = 5;
        INDArray input = Nd4j.zeros(nExamples, 1);
        INDArray labels = Nd4j.zeros(nExamples, 3);
        for (int i = 0; i < nExamples; i++) {
            input.putScalar(i, r.nextInt(4));
            labels.putScalar(new int[] {i, r.nextInt(3)}, 1.0);
        }

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().l2(0.2).l1(0.1)
                .dataType(DataType.DOUBLE)
                .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(12345L)
                .list().layer(new EmbeddingLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER)
                                .updater(new NoOp()).build())
                .layer(new PReLULayer.Builder().inputShape(3).sharedAxes(1).updater(new NoOp()).build())
                .layer(new OutputLayer.Builder(LossFunction.MCXENT).nIn(3).nOut(3)
                        .weightInit(WeightInit.XAVIER).dist(new NormalDistribution(0, 1))
                        .updater(new NoOp()).activation(Activation.SOFTMAX).build())
                .build();

        MultiLayerNetwork mln = new MultiLayerNetwork(conf);
        mln.init();

        if (PRINT_RESULTS) {
            System.out.println("testEmbeddingLayerSimple");
//            for (int j = 0; j < mln.getnLayers(); j++)
//                System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

        String msg = "testEmbeddingLayerSimple";
        assertTrue(msg, gradOK);
    }
 
Example #6
Source File: OutputLayerTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testCnnOutputLayerSoftmax(){
    //Check that softmax is applied channels-wise

    MultiLayerConfiguration conf =
            new NeuralNetConfiguration.Builder().seed(12345L)
                    .updater(new NoOp())
                    .convolutionMode(ConvolutionMode.Same)
                    .list()
                    .layer(new ConvolutionLayer.Builder().nIn(3).nOut(4).activation(Activation.IDENTITY)
                            .dist(new NormalDistribution(0, 1.0))
                            .updater(new NoOp()).build())
                    .layer(new CnnLossLayer.Builder(LossFunction.MSE)
                            .activation(Activation.SOFTMAX)
                            .build())
                    .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    INDArray in = Nd4j.rand(new int[]{2,3,4,5});
    INDArray out = net.output(in);

    double min = out.minNumber().doubleValue();
    double max = out.maxNumber().doubleValue();

    assertTrue(min >= 0 && max <= 1.0);

    INDArray sum = out.sum(1);
    assertEquals(Nd4j.ones(DataType.FLOAT,2,4,5), sum);
}
 
Example #7
Source File: CuDNNGradientChecks.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testDenseBatchNorm(){


    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .dataType(DataType.DOUBLE)
            .seed(12345)
            .weightInit(WeightInit.XAVIER)
            .updater(new NoOp())
            .list()
            .layer(new DenseLayer.Builder().nIn(5).nOut(5).activation(Activation.TANH).build())
            .layer(new BatchNormalization.Builder().nOut(5).build())
            .layer(new OutputLayer.Builder().nIn(5).nOut(5).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build())
            .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    INDArray in = Nd4j.rand(3, 5);
    INDArray labels = TestUtils.randomOneHot(3, 5);

    //Mean and variance vars are not gradient checkable; mean/variance "gradient" is used to implement running mean/variance calc
    //i.e., runningMean = decay * runningMean + (1-decay) * batchMean
    //However, numerical gradient will be 0 as forward pass doesn't depend on this "parameter"
    Set<String> excludeParams = new HashSet<>(Arrays.asList("1_mean", "1_var", "1_log10stdev"));
    boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
            DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, in, labels, null, null, false, -1, excludeParams, null);

    assertTrue(gradOK);

    TestUtils.testModelSerialization(net);
}
 
Example #8
Source File: BNGradientCheckTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
    public void testGradient2dFixedGammaBeta() {
        DataNormalization scaler = new NormalizerMinMaxScaler();
        DataSetIterator iter = new IrisDataSetIterator(150, 150);
        scaler.fit(iter);
        iter.setPreProcessor(scaler);
        DataSet ds = iter.next();
        INDArray input = ds.getFeatures();
        INDArray labels = ds.getLabels();

        for (boolean useLogStd : new boolean[]{true, false}) {
            MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().updater(new NoOp())
                    .dataType(DataType.DOUBLE)
                    .seed(12345L)
                    .dist(new NormalDistribution(0, 1)).list()
                    .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).activation(Activation.IDENTITY).build())
                    .layer(1, new BatchNormalization.Builder().useLogStd(useLogStd).lockGammaBeta(true).gamma(2.0).beta(0.5).nOut(3)
                            .build())
                    .layer(2, new ActivationLayer.Builder().activation(Activation.TANH).build())
                    .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                            .activation(Activation.SOFTMAX).nIn(3).nOut(3).build());

            MultiLayerNetwork mln = new MultiLayerNetwork(builder.build());
            mln.init();

//            for (int j = 0; j < mln.getnLayers(); j++)
//                System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());

            //Mean and variance vars are not gradient checkable; mean/variance "gradient" is used to implement running mean/variance calc
            //i.e., runningMean = decay * runningMean + (1-decay) * batchMean
            //However, numerical gradient will be 0 as forward pass doesn't depend on this "parameter"
            Set<String> excludeParams = new HashSet<>(Arrays.asList("1_mean", "1_var", "1_log10stdev"));
            boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.MLNConfig().net(mln).input(input)
                    .labels(labels).excludeParams(excludeParams));

            assertTrue(gradOK);
            TestUtils.testModelSerialization(mln);
        }
    }
 
Example #9
Source File: CNNGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testCnnDilated() {
    int nOut = 2;
    int minibatchSize = 2;
    int width = 8;
    int height = 8;
    int inputDepth = 2;

    Nd4j.getRandom().setSeed(12345);

    boolean[] sub = new boolean[]{true, true, false, true, false};
    int[] stride = new int[]{1, 1, 1, 2, 2};
    int[] kernel = new int[]{2, 3, 3, 3, 3};
    int[] ds = new int[]{2, 2, 3, 3, 2};
    ConvolutionMode[] cms = new ConvolutionMode[]{Same, Truncate, Truncate, Same, Truncate};

    boolean nchw = format == CNN2DFormat.NCHW;

    for (int t = 0; t < sub.length; t++) {
        boolean subsampling = sub[t];
        int s = stride[t];
        int k = kernel[t];
        int d = ds[t];
        ConvolutionMode cm = cms[t];

        //Use larger input with larger dilation values (to avoid invalid config)
        int w = d * width;
        int h = d * height;

        long[] inShape = nchw ? new long[]{minibatchSize, inputDepth, h, w} : new long[]{minibatchSize, h, w, inputDepth};
        INDArray input = Nd4j.rand(DataType.DOUBLE, inShape);
        INDArray labels = Nd4j.zeros(minibatchSize, nOut);
        for (int i = 0; i < minibatchSize; i++) {
            labels.putScalar(new int[]{i, i % nOut}, 1.0);
        }

        NeuralNetConfiguration.ListBuilder b = new NeuralNetConfiguration.Builder().seed(12345)
                .dataType(DataType.DOUBLE)
                .updater(new NoOp())
                .activation(Activation.TANH).convolutionMode(cm).list()
                .layer(new ConvolutionLayer.Builder().name("layer 0")
                        .kernelSize(k, k)
                        .stride(s, s)
                        .dilation(d, d)
                        .nIn(inputDepth).nOut(2).build());
        if (subsampling) {
            b.layer(new SubsamplingLayer.Builder()
                    .poolingType(SubsamplingLayer.PoolingType.MAX)
                    .kernelSize(k, k)
                    .stride(s, s)
                    .dilation(d, d)
                    .build());
        } else {
            b.layer(new ConvolutionLayer.Builder().nIn(2).nOut(2)
                    .kernelSize(k, k)
                    .stride(s, s)
                    .dilation(d, d)
                    .build());
        }

        MultiLayerConfiguration conf = b.layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                .activation(Activation.SOFTMAX).nOut(nOut).build())
                .setInputType(InputType.convolutional(h, w, inputDepth, format)).build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        for (int i = 0; i < net.getLayers().length; i++) {
            System.out.println("nParams, layer " + i + ": " + net.getLayer(i).numParams());
        }

        String msg = (subsampling ? "subsampling" : "conv") + " - mb=" + minibatchSize + ", k="
                + k + ", s=" + s + ", d=" + d + ", cm=" + cm;
        System.out.println(msg);

        boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

        assertTrue(msg, gradOK);

        TestUtils.testModelSerialization(net);
    }
}
 
Example #10
Source File: CNNGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testDeconvolution2D() {
    int nOut = 2;

    int[] minibatchSizes = new int[]{1, 3, 3, 1, 3};
    int[] kernelSizes = new int[]{1, 1, 1, 3, 3};
    int[] strides = {1, 1, 2, 2, 2};
    int[] dilation = {1, 2, 1, 2, 2};
    Activation[] activations = new Activation[]{Activation.SIGMOID, Activation.TANH, Activation.SIGMOID, Activation.SIGMOID, Activation.SIGMOID};
    ConvolutionMode[] cModes = new ConvolutionMode[]{Same, Same, Truncate, Truncate, Truncate};
    int width = 7;
    int height = 7;
    int inputDepth = 3;

    Nd4j.getRandom().setSeed(12345);

    boolean nchw = format == CNN2DFormat.NCHW;

    for (int i = 0; i < minibatchSizes.length; i++) {
        int minibatchSize = minibatchSizes[i];
        int k = kernelSizes[i];
        int s = strides[i];
        int d = dilation[i];
        ConvolutionMode cm = cModes[i];
        Activation act = activations[i];


        int w = d * width;
        int h = d * height;

        long[] inShape = nchw ? new long[]{minibatchSize, inputDepth, h, w} : new long[]{minibatchSize, h, w, inputDepth};
        INDArray input = Nd4j.rand(DataType.DOUBLE, inShape);


        INDArray labels = Nd4j.zeros(minibatchSize, nOut);
        for (int j = 0; j < minibatchSize; j++) {
            labels.putScalar(new int[]{j, j % nOut}, 1.0);
        }

        NeuralNetConfiguration.ListBuilder b = new NeuralNetConfiguration.Builder().seed(12345)
                .dataType(DataType.DOUBLE)
                .updater(new NoOp())
                .activation(act)
                .list()
                .layer(new Deconvolution2D.Builder().name("deconvolution_2D_layer")
                        .kernelSize(k, k)
                        .stride(s, s)
                        .dilation(d, d)
                        .convolutionMode(cm)
                        .nIn(inputDepth).nOut(nOut).build());

        MultiLayerConfiguration conf = b.layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                .activation(Activation.SOFTMAX).nOut(nOut).build())
                .setInputType(InputType.convolutional(h, w, inputDepth, format)).build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        for (int j = 0; j < net.getLayers().length; j++) {
            System.out.println("nParams, layer " + j + ": " + net.getLayer(j).numParams());
        }

        String msg = " - mb=" + minibatchSize + ", k="
                + k + ", s=" + s + ", d=" + d + ", cm=" + cm;
        System.out.println(msg);

        boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.MLNConfig().net(net).input(input)
                .labels(labels).subset(true).maxPerParam(100));

        assertTrue(msg, gradOK);

        TestUtils.testModelSerialization(net);
    }
}
 
Example #11
Source File: DTypeTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testAttentionDTypes() {
    for (DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
        Nd4j.setDefaultDataTypes(globalDtype, globalDtype);
        for (DataType networkDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
            assertEquals(globalDtype, Nd4j.dataType());
            assertEquals(globalDtype, Nd4j.defaultFloatingPointType());

            String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype;

            int mb = 3;
            int nIn = 3;
            int nOut = 5;
            int tsLength = 4;
            int layerSize = 8;
            int numQueries = 6;

            INDArray in = Nd4j.rand(networkDtype, new long[]{mb, nIn, tsLength});
            INDArray labels = TestUtils.randomOneHot(mb, nOut);

            MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .dataType(networkDtype)
                    .activation(Activation.TANH)
                    .updater(new NoOp())
                    .weightInit(WeightInit.XAVIER)
                    .list()
                    .layer(new LSTM.Builder().nOut(layerSize).build())
                    .layer(new SelfAttentionLayer.Builder().nOut(8).nHeads(2).projectInput(true).build())
                    .layer(new LearnedSelfAttentionLayer.Builder().nOut(8).nHeads(2).nQueries(numQueries).projectInput(true).build())
                    .layer(new RecurrentAttentionLayer.Builder().nIn(layerSize).nOut(layerSize).nHeads(1).projectInput(false).hasBias(false).build())
                    .layer(new GlobalPoolingLayer.Builder().poolingType(PoolingType.MAX).build())
                    .layer(new OutputLayer.Builder().nOut(nOut).activation(Activation.SOFTMAX)
                            .lossFunction(LossFunctions.LossFunction.MCXENT).build())
                    .setInputType(InputType.recurrent(nIn))
                    .build();

            MultiLayerNetwork net = new MultiLayerNetwork(conf);
            net.init();

            INDArray out = net.output(in);
            assertEquals(msg, networkDtype, out.dataType());
            List<INDArray> ff = net.feedForward(in);
            for (int i = 0; i < ff.size(); i++) {
                String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).conf().getLayer().getClass().getSimpleName());
                assertEquals(s, networkDtype, ff.get(i).dataType());
            }

            net.setInput(in);
            net.setLabels(labels);
            net.computeGradientAndScore();

            net.fit(new DataSet(in, labels));

            logUsedClasses(net);

            //Now, test mismatched dtypes for input/labels:
            for (DataType inputLabelDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
                INDArray in2 = in.castTo(inputLabelDtype);
                INDArray label2 = labels.castTo(inputLabelDtype);
                net.output(in2);
                net.setInput(in2);
                net.setLabels(label2);
                net.computeGradientAndScore();

                net.fit(new DataSet(in2, label2));
            }
        }
    }
}
 
Example #12
Source File: CNNGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testCnnSamePaddingMode() {
    int nOut = 2;

    int[] minibatchSizes = {1, 3, 3, 2, 1, 2};
    int[] heights = new int[]{4, 5, 6, 5, 4, 4}; //Same padding mode: insensitive to exact input size...
    int[] kernelSizes = new int[]{2, 3, 2, 3, 2, 3};
    int[] inputDepths = {1, 2, 4, 3, 2, 3};

    int width = 5;

    Nd4j.getRandom().setSeed(12345);

    boolean nchw = format == CNN2DFormat.NCHW;

    for( int i=0; i<minibatchSizes.length; i++ ){
        int inputDepth = inputDepths[i];
        int minibatchSize = minibatchSizes[i];
        int height = heights[i];
        int k = kernelSizes[i];

        long[] inShape = nchw ? new long[]{minibatchSize, inputDepth, height, width} : new long[]{minibatchSize, height, width, inputDepth};
        INDArray input = Nd4j.rand(DataType.DOUBLE, inShape);

        INDArray labels = TestUtils.randomOneHot(minibatchSize, nOut);

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                .dataType(DataType.DOUBLE)
                .updater(new NoOp())
                .activation(Activation.TANH).convolutionMode(Same).list()
                .layer(0, new ConvolutionLayer.Builder().name("layer 0").kernelSize(k, k)
                        .stride(1, 1).padding(0, 0).nIn(inputDepth).nOut(2).build())
                .layer(1, new SubsamplingLayer.Builder()
                        .poolingType(SubsamplingLayer.PoolingType.MAX).kernelSize(k, k)
                        .stride(1, 1).padding(0, 0).build())
                .layer(2, new ConvolutionLayer.Builder().nIn(2).nOut(2).kernelSize(k, k)
                        .stride(1, 1).padding(0, 0).build())
                .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                        .activation(Activation.SOFTMAX).nOut(nOut).build())
                .setInputType(InputType.convolutional(height, width, inputDepth, format)).build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        for (int j = 0; j < net.getLayers().length; j++) {
            System.out.println("nParams, layer " + j + ": " + net.getLayer(j).numParams());
        }

        String msg = "Minibatch=" + minibatchSize + ", inDepth=" + inputDepth + ", height=" + height
                + ", kernelSize=" + k;
        System.out.println(msg);

        boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

        assertTrue(msg, gradOK);

        TestUtils.testModelSerialization(net);
    }
}
 
Example #13
Source File: GradientCheckTestsComputationGraph.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testLSTMWithReverseTimeSeriesVertex() {

        Nd4j.getRandom().setSeed(12345);
        ComputationGraphConfiguration conf =
                new NeuralNetConfiguration.Builder().seed(12345)
                        .dataType(DataType.DOUBLE)
                        .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                        .dist(new NormalDistribution(0, 1))
                        .updater(new NoOp()).graphBuilder()
                        .addInputs("input").setOutputs("out")
                        .addLayer("lstm_a",
                                new LSTM.Builder().nIn(2).nOut(3)
                                        .activation(Activation.TANH).build(),
                                "input")
                        .addVertex("input_rev", new ReverseTimeSeriesVertex("input"), "input")
                        .addLayer("lstm_b",
                                new LSTM.Builder().nIn(2).nOut(3)
                                        .activation(Activation.TANH).build(),
                                "input_rev")
                        .addVertex("lstm_b_rev", new ReverseTimeSeriesVertex("input"), "lstm_b")
                        .addLayer("out", new RnnOutputLayer.Builder().nIn(3 + 3).nOut(2)
                                        .activation(Activation.SOFTMAX)
                                        .lossFunction(LossFunctions.LossFunction.MCXENT).build(),
                                "lstm_a", "lstm_b_rev")
                        .build();

        ComputationGraph graph = new ComputationGraph(conf);
        graph.init();

        Random r = new Random(12345);
        INDArray input  = Nd4j.rand(new int[] {2, 2, 4});
        INDArray labels = TestUtils.randomOneHotTimeSeries(2, 2, 4);

        if (PRINT_RESULTS) {
            System.out.println("testLSTMWithReverseTimeSeriesVertex()");
//            for (int j = 0; j < graph.getNumLayers(); j++)
//                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input})
                .labels(new INDArray[]{labels}));

        String msg = "testLSTMWithDuplicateToTimeSeries()";
        assertTrue(msg, gradOK);

        //Second: test with input mask arrays.
        INDArray inMask = Nd4j.zeros(3, 5);
        inMask.putRow(0, Nd4j.create(new double[] {1, 1, 1, 0, 0}));
        inMask.putRow(1, Nd4j.create(new double[] {1, 1, 0, 1, 0}));
        inMask.putRow(2, Nd4j.create(new double[] {1, 1, 1, 1, 1}));
        graph.setLayerMaskArrays(new INDArray[] {inMask}, null);
        gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input})
                .labels(new INDArray[]{labels}));

        assertTrue(msg, gradOK);
        TestUtils.testModelSerialization(graph);
    }
 
Example #14
Source File: CNN3DGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testDeconv3d() {
    Nd4j.getRandom().setSeed(12345);
    // Note: we checked this with a variety of parameters, but it takes a lot of time.
    int[] depths = {8, 8, 9};
    int[] heights = {8, 9, 9};
    int[] widths = {8, 8, 9};


    int[][] kernels = {{2, 2, 2}, {3, 3, 3}, {2, 3, 2}};
    int[][] strides = {{1, 1, 1}, {1, 1, 1}, {2, 2, 2}};

    Activation[] activations = {Activation.SIGMOID, Activation.TANH, Activation.IDENTITY};

    ConvolutionMode[] modes = {ConvolutionMode.Truncate, ConvolutionMode.Same, ConvolutionMode.Same};
    int[] mbs = {1, 3, 2};
    Convolution3D.DataFormat[] dataFormats = new Convolution3D.DataFormat[]{Convolution3D.DataFormat.NCDHW, Convolution3D.DataFormat.NDHWC, Convolution3D.DataFormat.NCDHW};

    int convNIn = 2;
    int finalNOut = 2;
    int[] deconvOut = {2, 3, 4};

    for (int i = 0; i < activations.length; i++) {
        Activation afn = activations[i];
        int miniBatchSize = mbs[i];
        int depth = depths[i];
        int height = heights[i];
        int width = widths[i];
        ConvolutionMode mode = modes[i];
        int[] kernel = kernels[i];
        int[] stride = strides[i];
        Convolution3D.DataFormat df = dataFormats[i];
        int dOut = deconvOut[i];

        INDArray input;
        if (df == Convolution3D.DataFormat.NDHWC) {
            input = Nd4j.rand(new int[]{miniBatchSize, depth, height, width, convNIn});
        } else {
            input = Nd4j.rand(new int[]{miniBatchSize, convNIn, depth, height, width});
        }
        INDArray labels = Nd4j.zeros(miniBatchSize, finalNOut);
        for (int j = 0; j < miniBatchSize; j++) {
            labels.putScalar(new int[]{j, j % finalNOut}, 1.0);
        }

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .dataType(DataType.DOUBLE)
                .updater(new NoOp())
                .weightInit(new NormalDistribution(0, 0.1))
                .list()
                .layer(0, new Convolution3D.Builder().activation(afn).kernelSize(kernel)
                        .stride(stride).nIn(convNIn).nOut(dOut).hasBias(false)
                        .convolutionMode(mode).dataFormat(df)
                        .build())
                .layer(1, new Deconvolution3D.Builder().activation(afn).kernelSize(kernel)
                        .stride(stride).nOut(dOut).hasBias(false)
                        .convolutionMode(mode).dataFormat(df)
                        .build())
                .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                        .activation(Activation.SOFTMAX).nOut(finalNOut).build())
                .setInputType(InputType.convolutional3D(df, depth, height, width, convNIn)).build();

        String json = conf.toJson();
        MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json);
        assertEquals(conf, c2);

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        String msg = "DataFormat = " + df + ", minibatch size = " + miniBatchSize + ", activationFn=" + afn
                + ", kernel = " + Arrays.toString(kernel) + ", stride = "
                + Arrays.toString(stride) + ", mode = " + mode.toString()
                + ", input depth " + depth + ", input height " + height
                + ", input width " + width;

        if (PRINT_RESULTS) {
            log.info(msg);
        }

        boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.MLNConfig().net(net).input(input)
                .labels(labels).subset(true).maxPerParam(64));

        assertTrue(msg, gradOK);

        TestUtils.testModelSerialization(net);
    }
}
 
Example #15
Source File: GradientCheckTestsComputationGraph.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testBasicIrisWithMerging() {
        Nd4j.getRandom().setSeed(12345);
        ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                        .dataType(DataType.DOUBLE)
                        .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                        .dist(new NormalDistribution(0, 1)).updater(new NoOp())
                        .graphBuilder().addInputs("input")
                        .addLayer("l1", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.TANH).build(),
                                        "input")
                        .addLayer("l2", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.TANH).build(),
                                        "input")
                        .addVertex("merge", new MergeVertex(), "l1", "l2")
                        .addLayer("outputLayer",
                                        new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
                                                        .activation(Activation.SOFTMAX).nIn(5 + 5).nOut(3).build(),
                                        "merge")
                        .setOutputs("outputLayer").build();

        ComputationGraph graph = new ComputationGraph(conf);
        graph.init();

        int numParams = (4 * 5 + 5) + (4 * 5 + 5) + (10 * 3 + 3);
        assertEquals(numParams, graph.numParams());

        Nd4j.getRandom().setSeed(12345);
        long nParams = graph.numParams();
        INDArray newParams = Nd4j.rand(new long[]{1, nParams});
        graph.setParams(newParams);

        DataSet ds = new IrisDataSetIterator(150, 150).next();
        INDArray min = ds.getFeatures().min(0);
        INDArray max = ds.getFeatures().max(0);
        ds.getFeatures().subiRowVector(min).diviRowVector(max.sub(min));
        INDArray input = ds.getFeatures();
        INDArray labels = ds.getLabels();

        if (PRINT_RESULTS) {
            System.out.println("testBasicIrisWithMerging()");
//            for (int j = 0; j < graph.getNumLayers(); j++)
//                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input})
                .labels(new INDArray[]{labels}));

        String msg = "testBasicIrisWithMerging()";
        assertTrue(msg, gradOK);
        TestUtils.testModelSerialization(graph);
    }
 
Example #16
Source File: OutputLayerTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testOutputLayersRnnForwardPass() {
    //Test output layer with RNNs (
    //Expect all outputs etc. to be 2d
    int nIn = 2;
    int nOut = 5;
    int layerSize = 4;
    int timeSeriesLength = 6;
    int miniBatchSize = 3;

    Random r = new Random(12345L);
    INDArray input = Nd4j.zeros(miniBatchSize, nIn, timeSeriesLength);
    for (int i = 0; i < miniBatchSize; i++) {
        for (int j = 0; j < nIn; j++) {
            for (int k = 0; k < timeSeriesLength; k++) {
                input.putScalar(new int[] {i, j, k}, r.nextDouble() - 0.5);
            }
        }
    }

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345L).list()
                    .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize)
                                    .dist(new NormalDistribution(0, 1)).activation(Activation.TANH)
                                    .updater(new NoOp()).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut)
                                    .dist(new NormalDistribution(0, 1))
                                    .updater(new NoOp()).build())
                    .inputPreProcessor(1, new RnnToFeedForwardPreProcessor()).build();

    MultiLayerNetwork mln = new MultiLayerNetwork(conf);
    mln.init();

    INDArray out2d = mln.feedForward(input).get(2);
    assertArrayEquals(out2d.shape(), new long[] {miniBatchSize * timeSeriesLength, nOut});

    INDArray out = mln.output(input);
    assertArrayEquals(out.shape(), new long[] {miniBatchSize * timeSeriesLength, nOut});

    INDArray preout = mln.output(input);
    assertArrayEquals(preout.shape(), new long[] {miniBatchSize * timeSeriesLength, nOut});

    //As above, but for RnnOutputLayer. Expect all activations etc. to be 3d

    MultiLayerConfiguration confRnn = new NeuralNetConfiguration.Builder().seed(12345L).list()
                    .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize)
                                    .dist(new NormalDistribution(0, 1)).activation(Activation.TANH)
                                    .updater(new NoOp()).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.RnnOutputLayer.Builder(LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut)
                                    .dist(new NormalDistribution(0, 1))
                                    .updater(new NoOp()).build())
                    .build();

    MultiLayerNetwork mlnRnn = new MultiLayerNetwork(confRnn);
    mln.init();

    INDArray out3d = mlnRnn.feedForward(input).get(2);
    assertArrayEquals(out3d.shape(), new long[] {miniBatchSize, nOut, timeSeriesLength});

    INDArray outRnn = mlnRnn.output(input);
    assertArrayEquals(outRnn.shape(), new long[] {miniBatchSize, nOut, timeSeriesLength});

    INDArray preoutRnn = mlnRnn.output(input);
    assertArrayEquals(preoutRnn.shape(), new long[] {miniBatchSize, nOut, timeSeriesLength});
}
 
Example #17
Source File: GradientCheckTestsComputationGraph.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testBasicIrisWithElementWiseNode() {

        ElementWiseVertex.Op[] ops = new ElementWiseVertex.Op[] {ElementWiseVertex.Op.Add,
                        ElementWiseVertex.Op.Subtract, ElementWiseVertex.Op.Product, ElementWiseVertex.Op.Average, ElementWiseVertex.Op.Max};

        for (ElementWiseVertex.Op op : ops) {

            Nd4j.getRandom().setSeed(12345);
            ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                            .dataType(DataType.DOUBLE)
                            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                            .dist(new NormalDistribution(0, 1))
                            .updater(new NoOp()).graphBuilder().addInputs("input")
                            .addLayer("l1", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.TANH).build(),
                                            "input")
                            .addLayer("l2", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.SIGMOID)
                                            .build(), "input")
                            .addVertex("elementwise", new ElementWiseVertex(op), "l1", "l2")
                            .addLayer("outputLayer",
                                            new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
                                                            .activation(Activation.SOFTMAX).nIn(5).nOut(3).build(),
                                            "elementwise")
                            .setOutputs("outputLayer").build();

            ComputationGraph graph = new ComputationGraph(conf);
            graph.init();

            int numParams = (4 * 5 + 5) + (4 * 5 + 5) + (5 * 3 + 3);
            assertEquals(numParams, graph.numParams());

            Nd4j.getRandom().setSeed(12345);
            long nParams = graph.numParams();
            INDArray newParams = Nd4j.rand(new long[]{1, nParams});
            graph.setParams(newParams);

            DataSet ds = new IrisDataSetIterator(150, 150).next();
            INDArray min = ds.getFeatures().min(0);
            INDArray max = ds.getFeatures().max(0);
            ds.getFeatures().subiRowVector(min).diviRowVector(max.sub(min));
            INDArray input = ds.getFeatures();
            INDArray labels = ds.getLabels();

            if (PRINT_RESULTS) {
                System.out.println("testBasicIrisWithElementWiseVertex(op=" + op + ")");
//                for (int j = 0; j < graph.getNumLayers(); j++)
//                    System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
            }

            boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input})
                    .labels(new INDArray[]{labels}));

            String msg = "testBasicIrisWithElementWiseVertex(op=" + op + ")";
            assertTrue(msg, gradOK);
            TestUtils.testModelSerialization(graph);
        }
    }
 
Example #18
Source File: CNN3DGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testCnn3DCropping() {
        Nd4j.getRandom().setSeed(42);

        int depth = 6;
        int height = 6;
        int width = 6;


        int[] minibatchSizes = {3};
        int convNIn = 2;
        int convNOut1 = 3;
        int convNOut2 = 4;
        int denseNOut = 5;
        int finalNOut = 8;


        int[] kernel = {1, 1, 1};
        int[] cropping = {0, 0, 1, 1, 2, 2};

        Activation[] activations = {Activation.SIGMOID};

        ConvolutionMode[] modes = {ConvolutionMode.Same};

        for (Activation afn : activations) {
            for (int miniBatchSize : minibatchSizes) {
                for (ConvolutionMode mode : modes) {

                    int outDepth = mode == ConvolutionMode.Same ?
                            depth : (depth - kernel[0]) + 1;
                    int outHeight = mode == ConvolutionMode.Same ?
                            height : (height - kernel[1]) + 1;
                    int outWidth = mode == ConvolutionMode.Same ?
                            width : (width - kernel[2]) + 1;

                    outDepth -= cropping[0] + cropping[1];
                    outHeight -= cropping[2] + cropping[3];
                    outWidth -= cropping[4] + cropping[5];

                    INDArray input = Nd4j.rand(new int[]{miniBatchSize, convNIn, depth, height, width});
                    INDArray labels = Nd4j.zeros(miniBatchSize, finalNOut);
                    for (int i = 0; i < miniBatchSize; i++) {
                        labels.putScalar(new int[]{i, i % finalNOut}, 1.0);
                    }

                    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                            .dataType(DataType.DOUBLE)
                            .updater(new NoOp()).weightInit(WeightInit.LECUN_NORMAL)
                            .dist(new NormalDistribution(0, 1))
                            .list()
                            .layer(0, new Convolution3D.Builder().activation(afn).kernelSize(kernel)
                                    .nIn(convNIn).nOut(convNOut1).hasBias(false)
                                    .convolutionMode(mode).dataFormat(Convolution3D.DataFormat.NCDHW)
                                    .build())
                            .layer(1, new Convolution3D.Builder().activation(afn).kernelSize(1, 1, 1)
                                    .nIn(convNOut1).nOut(convNOut2).hasBias(false)
                                    .convolutionMode(mode).dataFormat(Convolution3D.DataFormat.NCDHW)
                                    .build())
                            .layer(2, new Cropping3D.Builder(cropping).build())
                            .layer(3, new DenseLayer.Builder().nOut(denseNOut).build())
                            .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nOut(finalNOut).build())
                            .inputPreProcessor(3,
                                    new Cnn3DToFeedForwardPreProcessor(outDepth, outHeight, outWidth,
                                            convNOut2, true))
                            .setInputType(InputType.convolutional3D(depth, height, width, convNIn)).build();

                    String json = conf.toJson();
                    MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json);
                    assertEquals(conf, c2);

                    MultiLayerNetwork net = new MultiLayerNetwork(conf);
                    net.init();

                    String msg = "Minibatch size = " + miniBatchSize + ", activationFn=" + afn
                            + ", kernel = " + Arrays.toString(kernel) + ", mode = " + mode.toString()
                            + ", input depth " + depth + ", input height " + height
                            + ", input width " + width;

                    if (PRINT_RESULTS) {
                        log.info(msg);
//                        for (int j = 0; j < net.getnLayers(); j++) {
//                            log.info("Layer " + j + " # params: " + net.getLayer(j).numParams());
//                        }
                    }

                    boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS,
                            DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS,
                            RETURN_ON_FIRST_FAILURE, input, labels);

                    assertTrue(msg, gradOK);

                    TestUtils.testModelSerialization(net);
                }

            }
        }
    }
 
Example #19
Source File: CNNGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testCnnZeroPaddingLayer() {
        Nd4j.getRandom().setSeed(12345);
        int nOut = 4;


        int width = 6;
        int height = 6;


        int[] kernel = {2, 2};
        int[] stride = {1, 1};
        int[] padding = {0, 0};

        int[] minibatchSizes = {1, 3, 2};
        int[] inputDepths = {1, 3, 2};
        int[][] zeroPadLayer = new int[][]{{0, 0, 0, 0}, {1, 1, 0, 0}, {2, 2, 2, 2}};

        boolean nchw = format == CNN2DFormat.NCHW;

        for( int i=0; i<minibatchSizes.length; i++ ){
            int minibatchSize = minibatchSizes[i];
            int inputDepth = inputDepths[i];
            int[] zeroPad = zeroPadLayer[i];

            long[] inShape = nchw ? new long[]{minibatchSize, inputDepth, height, width} : new long[]{minibatchSize, height, width, inputDepth};
            INDArray input = Nd4j.rand(DataType.DOUBLE, inShape);
            INDArray labels = TestUtils.randomOneHot(minibatchSize, nOut);

            MultiLayerConfiguration conf =
                    new NeuralNetConfiguration.Builder().updater(new NoOp())
                            .dataType(DataType.DOUBLE)
                            .dist(new NormalDistribution(0, 1)).list()
                            .layer(0, new ConvolutionLayer.Builder(kernel, stride, padding)
                                    .nIn(inputDepth).nOut(3).build())//output: (6-2+0)/1+1 = 5
                            .layer(1, new ZeroPaddingLayer.Builder(zeroPad).build()).layer(2,
                            new ConvolutionLayer.Builder(kernel, stride,
                                    padding).nIn(3).nOut(3).build())//output: (6-2+0)/1+1 = 5
                            .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nOut(4).build())
                            .setInputType(InputType.convolutional(height, width, inputDepth, format))
                            .build();

            MultiLayerNetwork net = new MultiLayerNetwork(conf);
            net.init();

            //Check zero padding activation shape
            org.deeplearning4j.nn.layers.convolution.ZeroPaddingLayer zpl =
                    (org.deeplearning4j.nn.layers.convolution.ZeroPaddingLayer) net.getLayer(1);
            long[] expShape;
            if(nchw){
                expShape = new long[]{minibatchSize, inputDepth, height + zeroPad[0] + zeroPad[1],
                        width + zeroPad[2] + zeroPad[3]};
            } else {
                expShape = new long[]{minibatchSize, height + zeroPad[0] + zeroPad[1],
                        width + zeroPad[2] + zeroPad[3], inputDepth};
            }
            INDArray out = zpl.activate(input, false, LayerWorkspaceMgr.noWorkspaces());
            assertArrayEquals(expShape, out.shape());

            String msg = "minibatch=" + minibatchSize + ", channels=" + inputDepth + ", zeroPad = "
                    + Arrays.toString(zeroPad);

            if (PRINT_RESULTS) {
                System.out.println(msg);
//                for (int j = 0; j < net.getnLayers(); j++)
//                    System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams());
            }

            boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

            assertTrue(msg, gradOK);

            TestUtils.testModelSerialization(net);
        }
    }
 
Example #20
Source File: CuDNNGradientChecks.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testLRN() throws Exception {

    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int depth = 6;
    int hw = 5;
    int nOut = 4;
    INDArray input = Nd4j.rand(new int[] {minibatch, depth, hw, hw});
    INDArray labels = Nd4j.zeros(minibatch, nOut);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        labels.putScalar(i, r.nextInt(nOut), 1.0);
    }

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().updater(new NoOp())
                    .dataType(DataType.DOUBLE)
                    .seed(12345L)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new ConvolutionLayer.Builder().nOut(6).kernelSize(2, 2).stride(1, 1)
                                    .activation(Activation.TANH).build())
                    .layer(1, new LocalResponseNormalization.Builder().build())
                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nOut(nOut).build())
                    .setInputType(InputType.convolutional(hw, hw, depth));

    MultiLayerNetwork mln = new MultiLayerNetwork(builder.build());
    mln.init();

    Field f = org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization.class
                    .getDeclaredField("helper");
    f.setAccessible(true);

    org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization l =
                    (org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization) mln.getLayer(1);
    LocalResponseNormalizationHelper lrn = (LocalResponseNormalizationHelper) f.get(l);
    assertTrue(lrn instanceof CudnnLocalResponseNormalizationHelper);

    //-------------------------------
    //For debugging/comparison to no-cudnn case: set helper field to null
    //        f.set(l, null);
    //        assertNull(f.get(l));
    //-------------------------------

    if (PRINT_RESULTS) {
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    assertTrue(gradOK);
}
 
Example #21
Source File: CNNGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testGradientCNNMLN() {
        if(this.format != CNN2DFormat.NCHW) //Only test NCHW due to flat input format...
            return;

        //Parameterized test, testing combinations of:
        // (a) activation function
        // (b) Whether to test at random initialization, or after some learning (i.e., 'characteristic mode of operation')
        // (c) Loss function (with specified output activations)
        Activation[] activFns = {Activation.SIGMOID, Activation.TANH};
        boolean[] characteristic = {false, true}; //If true: run some backprop steps first

        LossFunctions.LossFunction[] lossFunctions =
                {LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD, LossFunctions.LossFunction.MSE};
        Activation[] outputActivations = {Activation.SOFTMAX, Activation.TANH}; //i.e., lossFunctions[i] used with outputActivations[i] here

        DataSet ds = new IrisDataSetIterator(150, 150).next();
        ds.normalizeZeroMeanZeroUnitVariance();
        INDArray input = ds.getFeatures();
        INDArray labels = ds.getLabels();

        for (Activation afn : activFns) {
            for (boolean doLearningFirst : characteristic) {
                for (int i = 0; i < lossFunctions.length; i++) {
                    LossFunctions.LossFunction lf = lossFunctions[i];
                    Activation outputActivation = outputActivations[i];

                    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder()
                            .dataType(DataType.DOUBLE)
                            .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).updater(new NoOp())
                            .weightInit(WeightInit.XAVIER).seed(12345L).list()
                            .layer(0, new ConvolutionLayer.Builder(1, 1).nOut(6).activation(afn).build())
                            .layer(1, new OutputLayer.Builder(lf).activation(outputActivation).nOut(3).build())
                            .setInputType(InputType.convolutionalFlat(1, 4, 1));

                    MultiLayerConfiguration conf = builder.build();

                    MultiLayerNetwork mln = new MultiLayerNetwork(conf);
                    mln.init();
                    String name = new Object() {
                    }.getClass().getEnclosingMethod().getName();

                    if (doLearningFirst) {
                        //Run a number of iterations of learning
                        mln.setInput(ds.getFeatures());
                        mln.setLabels(ds.getLabels());
                        mln.computeGradientAndScore();
                        double scoreBefore = mln.score();
                        for (int j = 0; j < 10; j++)
                            mln.fit(ds);
                        mln.computeGradientAndScore();
                        double scoreAfter = mln.score();
                        //Can't test in 'characteristic mode of operation' if not learning
                        String msg = name + " - score did not (sufficiently) decrease during learning - activationFn="
                                + afn + ", lossFn=" + lf + ", outputActivation=" + outputActivation
                                + ", doLearningFirst= " + doLearningFirst + " (before=" + scoreBefore
                                + ", scoreAfter=" + scoreAfter + ")";
                        assertTrue(msg, scoreAfter < 0.9 * scoreBefore);
                    }

                    if (PRINT_RESULTS) {
                        System.out.println(name + " - activationFn=" + afn + ", lossFn=" + lf + ", outputActivation="
                                + outputActivation + ", doLearningFirst=" + doLearningFirst);
//                        for (int j = 0; j < mln.getnLayers(); j++)
//                            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
                    }

                    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                            DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

                    assertTrue(gradOK);
                    TestUtils.testModelSerialization(mln);
                }
            }
        }
    }
 
Example #22
Source File: CNN1DGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testCnn1DWithCropping1D() {
        Nd4j.getRandom().setSeed(1337);

        int[] minibatchSizes = {1, 3};
        int length = 7;
        int convNIn = 2;
        int convNOut1 = 3;
        int convNOut2 = 4;
        int finalNOut = 4;


        int[] kernels = {1, 2, 4};
        int stride = 1;

        int padding = 0;
        int cropping = 1;
        int croppedLength = length - 2 * cropping;

        Activation[] activations = {Activation.SIGMOID};
        SubsamplingLayer.PoolingType[] poolingTypes =
                new SubsamplingLayer.PoolingType[]{SubsamplingLayer.PoolingType.MAX,
                        SubsamplingLayer.PoolingType.AVG, SubsamplingLayer.PoolingType.PNORM};

        for (Activation afn : activations) {
            for (SubsamplingLayer.PoolingType poolingType : poolingTypes) {
                for (int minibatchSize : minibatchSizes) {
                    for (int kernel : kernels) {
                        INDArray input = Nd4j.rand(new int[]{minibatchSize, convNIn, length});
                        INDArray labels = Nd4j.zeros(minibatchSize, finalNOut, croppedLength);
                        for (int i = 0; i < minibatchSize; i++) {
                            for (int j = 0; j < croppedLength; j++) {
                                labels.putScalar(new int[]{i, i % finalNOut, j}, 1.0);
                            }
                        }

                        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                                .dataType(DataType.DOUBLE)
                                .updater(new NoOp())
                                .dist(new NormalDistribution(0, 1)).convolutionMode(ConvolutionMode.Same).list()
                                .layer(new Convolution1DLayer.Builder().activation(afn).kernelSize(kernel)
                                        .stride(stride).padding(padding).nIn(convNIn).nOut(convNOut1)
                                        .build())
                                .layer(new Cropping1D.Builder(cropping).build())
                                .layer(new Convolution1DLayer.Builder().activation(afn).kernelSize(kernel)
                                        .stride(stride).padding(padding).nIn(convNOut1).nOut(convNOut2)
                                        .build())
                                .layer(new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nOut(finalNOut).build())
                                .setInputType(InputType.recurrent(convNIn, length)).build();

                        String json = conf.toJson();
                        MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json);
                        assertEquals(conf, c2);

                        MultiLayerNetwork net = new MultiLayerNetwork(conf);
                        net.init();

                        String msg = "PoolingType=" + poolingType + ", minibatch=" + minibatchSize + ", activationFn="
                                + afn + ", kernel = " + kernel;

                        if (PRINT_RESULTS) {
                            System.out.println(msg);
//                            for (int j = 0; j < net.getnLayers(); j++)
//                                System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams());
                        }

                        boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                                DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

                        assertTrue(msg, gradOK);

                        TestUtils.testModelSerialization(net);
                    }
                }
            }
        }
    }
 
Example #23
Source File: CNNGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testCnnLocallyConnected2D() {
    int nOut = 3;
    int width = 5;
    int height = 5;

    Nd4j.getRandom().setSeed(12345);

    int[] inputDepths = new int[]{1, 2, 4};
    Activation[] activations = {Activation.SIGMOID, Activation.TANH, Activation.SOFTPLUS};
    int[] minibatch = {2, 1, 3};

    boolean nchw = format == CNN2DFormat.NCHW;

    for( int i=0; i<inputDepths.length; i++ ){
        int inputDepth = inputDepths[i];
        Activation afn = activations[i];
        int minibatchSize = minibatch[i];

        long[] inShape = nchw ? new long[]{minibatchSize, inputDepth, height, width} : new long[]{minibatchSize, height, width, inputDepth};
        INDArray input = Nd4j.rand(DataType.DOUBLE, inShape);
        INDArray labels = TestUtils.randomOneHot(minibatchSize, nOut);

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(new NoOp())
                .dataType(DataType.DOUBLE)
                .activation(afn)
                .list()
                .layer(0, new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1)
                        .padding(0, 0).nIn(inputDepth).nOut(2).build())//output: (5-2+0)/1+1 = 4
                .layer(1, new LocallyConnected2D.Builder().nIn(2).nOut(7).kernelSize(2, 2)
                        .setInputSize(4, 4).convolutionMode(ConvolutionMode.Strict).hasBias(false)
                        .stride(1, 1).padding(0, 0).build()) //(4-2+0)/1+1 = 3
                .layer(2, new ConvolutionLayer.Builder().nIn(7).nOut(2).kernelSize(2, 2)
                        .stride(1, 1).padding(0, 0).build()) //(3-2+0)/1+1 = 2
                .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                        .activation(Activation.SOFTMAX).nIn(2 * 2 * 2).nOut(nOut)
                        .build())
                .setInputType(InputType.convolutional(height, width, inputDepth, format)).build();

        assertEquals(ConvolutionMode.Truncate,
                ((ConvolutionLayer) conf.getConf(0).getLayer()).getConvolutionMode());

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        String msg = "Minibatch=" + minibatchSize + ", activationFn="
                + afn;
        System.out.println(msg);

        boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

        assertTrue(msg, gradOK);

        TestUtils.testModelSerialization(net);
    }
}
 
Example #24
Source File: CNNGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testCnnWithSubsamplingV2() {
    Nd4j.getRandom().setSeed(12345);
    int nOut = 4;

    int[] minibatchSizes = {1, 3};
    int width = 5;
    int height = 5;
    int inputDepth = 1;

    int[] kernel = {2, 2};
    int[] stride = {1, 1};
    int[] padding = {0, 0};
    int pNorm = 3;

    Activation[] activations = {Activation.SIGMOID, Activation.TANH};
    SubsamplingLayer.PoolingType[] poolingTypes =
            new SubsamplingLayer.PoolingType[]{SubsamplingLayer.PoolingType.MAX,
                    SubsamplingLayer.PoolingType.AVG, SubsamplingLayer.PoolingType.PNORM};

    boolean nchw = format == CNN2DFormat.NCHW;

    for (Activation afn : activations) {
        for (SubsamplingLayer.PoolingType poolingType : poolingTypes) {
            for (int minibatchSize : minibatchSizes) {
                long[] inShape = nchw ? new long[]{minibatchSize, inputDepth, height, width} : new long[]{minibatchSize, height, width, inputDepth};
                INDArray input = Nd4j.rand(DataType.DOUBLE, inShape);
                INDArray labels = Nd4j.zeros(minibatchSize, nOut);
                for (int i = 0; i < minibatchSize; i++) {
                    labels.putScalar(new int[]{i, i % nOut}, 1.0);
                }

                MultiLayerConfiguration conf =
                        new NeuralNetConfiguration.Builder().updater(new NoOp())
                                .dataType(DataType.DOUBLE)
                                .dist(new NormalDistribution(0, 1))
                                .list().layer(0,
                                new ConvolutionLayer.Builder(kernel,
                                        stride, padding).nIn(inputDepth)
                                        .nOut(3).build())//output: (5-2+0)/1+1 = 4
                                .layer(1, new SubsamplingLayer.Builder(poolingType)
                                        .kernelSize(kernel).stride(stride).padding(padding)
                                        .pnorm(pNorm).build()) //output: (4-2+0)/1+1 =3 -> 3x3x3
                                .layer(2, new ConvolutionLayer.Builder(kernel, stride, padding)
                                        .nIn(3).nOut(2).build()) //Output: (3-2+0)/1+1 = 2
                                .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(2 * 2 * 2)
                                        .nOut(4).build())
                                .setInputType(InputType.convolutional(height, width, inputDepth, format))
                                .build();

                MultiLayerNetwork net = new MultiLayerNetwork(conf);
                net.init();

                String msg = "PoolingType=" + poolingType + ", minibatch=" + minibatchSize + ", activationFn="
                        + afn;
                System.out.println(msg);

                boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

                assertTrue(msg, gradOK);

                TestUtils.testModelSerialization(net);
            }
        }
    }
}
 
Example #25
Source File: CNNGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testCnnWithSubsampling() {
        Nd4j.getRandom().setSeed(12345);
        int nOut = 4;

        int[] minibatchSizes = {1, 3};
        int width = 5;
        int height = 5;
        int inputDepth = 1;

        int[] kernel = {2, 2};
        int[] stride = {1, 1};
        int[] padding = {0, 0};
        int pnorm = 2;

        Activation[] activations = {Activation.SIGMOID, Activation.TANH};
        SubsamplingLayer.PoolingType[] poolingTypes =
                new SubsamplingLayer.PoolingType[]{SubsamplingLayer.PoolingType.MAX,
                        SubsamplingLayer.PoolingType.AVG, SubsamplingLayer.PoolingType.PNORM};

        boolean nchw = format == CNN2DFormat.NCHW;

        for (Activation afn : activations) {
            for (SubsamplingLayer.PoolingType poolingType : poolingTypes) {
                for (int minibatchSize : minibatchSizes) {
                    long[] inShape = nchw ? new long[]{minibatchSize, inputDepth, height, width} : new long[]{minibatchSize, height, width, inputDepth};
                    INDArray input = Nd4j.rand(DataType.DOUBLE, inShape);
                    INDArray labels = Nd4j.zeros(minibatchSize, nOut);
                    for (int i = 0; i < minibatchSize; i++) {
                        labels.putScalar(new int[]{i, i % nOut}, 1.0);
                    }

                    MultiLayerConfiguration conf =
                            new NeuralNetConfiguration.Builder().updater(new NoOp())
                                    .dataType(DataType.DOUBLE)
                                    .dist(new NormalDistribution(0, 1))
                                    .list().layer(0,
                                    new ConvolutionLayer.Builder(kernel,
                                            stride, padding).nIn(inputDepth)
                                            .nOut(3).build())//output: (5-2+0)/1+1 = 4
                                    .layer(1, new SubsamplingLayer.Builder(poolingType)
                                            .kernelSize(kernel).stride(stride).padding(padding)
                                            .pnorm(pnorm).build()) //output: (4-2+0)/1+1 =3 -> 3x3x3
                                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                            .activation(Activation.SOFTMAX).nIn(3 * 3 * 3)
                                            .nOut(4).build())
                                    .setInputType(InputType.convolutional(height, width, inputDepth, format))
                                    .build();

                    MultiLayerNetwork net = new MultiLayerNetwork(conf);
                    net.init();

                    String msg = format + " - poolingType=" + poolingType + ", minibatch=" + minibatchSize + ", activationFn="
                            + afn;

                    if (PRINT_RESULTS) {
                        System.out.println(msg);
//                        for (int j = 0; j < net.getnLayers(); j++)
//                            System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams());
                    }

                    boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                            DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

                    assertTrue(msg, gradOK);

                    TestUtils.testModelSerialization(net);
                }
            }
        }
    }
 
Example #26
Source File: CNNGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testCnnWithUpsampling() {
        Nd4j.getRandom().setSeed(12345);
        int nOut = 4;

        int[] minibatchSizes = {1, 3};
        int width = 5;
        int height = 5;
        int inputDepth = 1;

        int[] kernel = {2, 2};
        int[] stride = {1, 1};
        int[] padding = {0, 0};
        int size = 2;

        boolean nchw = format == CNN2DFormat.NCHW;

        for (int minibatchSize : minibatchSizes) {
            long[] inShape = nchw ? new long[]{minibatchSize, inputDepth, height, width} : new long[]{minibatchSize, height, width, inputDepth};
            INDArray input = Nd4j.rand(DataType.DOUBLE, inShape);
            INDArray labels = TestUtils.randomOneHot(minibatchSize, nOut);

            MultiLayerConfiguration conf =
                    new NeuralNetConfiguration.Builder()
                            .dataType(DataType.DOUBLE)
                            .updater(new NoOp())
                            .dist(new NormalDistribution(0, 1))
                            .list().layer(new ConvolutionLayer.Builder(kernel,
                            stride, padding).nIn(inputDepth)
                            .nOut(3).build())//output: (5-2+0)/1+1 = 4
                            .layer(new Upsampling2D.Builder().size(size).build()) //output: 4*2 =8 -> 8x8x3
                            .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(8 * 8 * 3)
                                    .nOut(4).build())
                            .setInputType(InputType.convolutional(height, width, inputDepth, format))
                            .build();

            MultiLayerNetwork net = new MultiLayerNetwork(conf);
            net.init();

            String msg = "Upsampling - minibatch=" + minibatchSize;

            if (PRINT_RESULTS) {
                System.out.println(msg);
//                for (int j = 0; j < net.getnLayers(); j++)
//                    System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams());
            }

            boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

            assertTrue(msg, gradOK);

            TestUtils.testModelSerialization(net);
        }
    }
 
Example #27
Source File: CNNGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testCnnWithSpaceToBatch() {
        Nd4j.getRandom().setSeed(12345);
        int nOut = 4;

        int[] minibatchSizes = {2, 4};
        int width = 5;
        int height = 5;
        int inputDepth = 1;

        int[] kernel = {2, 2};
        int[] blocks = {2, 2};

        String[] activations = {"sigmoid", "tanh"};
        SubsamplingLayer.PoolingType[] poolingTypes =
                new SubsamplingLayer.PoolingType[]{SubsamplingLayer.PoolingType.MAX,
                        SubsamplingLayer.PoolingType.AVG, SubsamplingLayer.PoolingType.PNORM};

        boolean nchw = format == CNN2DFormat.NCHW;
        for (String afn : activations) {
            for (SubsamplingLayer.PoolingType poolingType : poolingTypes) {
                for (int minibatchSize : minibatchSizes) {
                    long[] inShape = nchw ? new long[]{minibatchSize, inputDepth, height, width} : new long[]{minibatchSize, height, width, inputDepth};
                    INDArray input = Nd4j.rand(DataType.DOUBLE, inShape);
                    INDArray labels = Nd4j.zeros(4 * minibatchSize, nOut);
                    for (int i = 0; i < 4 * minibatchSize; i++) {
                        labels.putScalar(new int[]{i, i % nOut}, 1.0);
                    }

                    MultiLayerConfiguration conf =
                            new NeuralNetConfiguration.Builder()
                                    .dataType(DataType.DOUBLE)
                                    .updater(new NoOp()).weightInit(new NormalDistribution(0, 1))
                                    .list()
                                    .layer(new ConvolutionLayer.Builder(kernel).nIn(inputDepth).nOut(3).build())
                                    .layer(new SpaceToBatchLayer.Builder(blocks).build()) //trivial space to batch
                                    .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                            .activation(Activation.SOFTMAX)
                                            .nOut(nOut).build())
                                    .setInputType(InputType.convolutional(height, width, inputDepth, format))
                                    .build();

                    MultiLayerNetwork net = new MultiLayerNetwork(conf);
                    net.init();

                    String msg = format + " - poolingType=" + poolingType + ", minibatch=" + minibatchSize + ", activationFn="
                            + afn;

                    if (PRINT_RESULTS) {
                        System.out.println(msg);
//                        for (int j = 0; j < net.getnLayers(); j++)
//                            System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams());
                    }
                    boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                            DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

                    assertTrue(msg, gradOK);

                    //Also check compgraph:
                    ComputationGraph cg = net.toComputationGraph();
                    gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(cg).inputs(new INDArray[]{input})
                            .labels(new INDArray[]{labels}));
                    assertTrue(msg + " - compgraph", gradOK);

                    TestUtils.testModelSerialization(net);
                }
            }
        }
    }
 
Example #28
Source File: CNNGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Ignore
    @Test
    public void testCnnWithSpaceToDepth() {
        Nd4j.getRandom().setSeed(12345);
        int nOut = 4;
        int minibatchSize = 2;

        int width = 5;
        int height = 5;
        int inputDepth = 1;

        int[] kernel = {2, 2};
        int blocks = 2;

        String[] activations = {"sigmoid"};
        SubsamplingLayer.PoolingType[] poolingTypes =
                new SubsamplingLayer.PoolingType[]{SubsamplingLayer.PoolingType.MAX,
                        SubsamplingLayer.PoolingType.AVG, SubsamplingLayer.PoolingType.PNORM};

        for (String afn : activations) {
            for (SubsamplingLayer.PoolingType poolingType : poolingTypes) {
                INDArray input = Nd4j.rand(minibatchSize, width * height * inputDepth);
                INDArray labels = Nd4j.zeros(minibatchSize, nOut);
                for (int i = 0; i < minibatchSize; i++) {
                    labels.putScalar(new int[]{i, i % nOut}, 1.0);
                }

                MultiLayerConfiguration conf =
                        new NeuralNetConfiguration.Builder()
                                .dataType(DataType.DOUBLE)
                                .updater(new NoOp())
                                .dist(new NormalDistribution(0, 1))
                                .list().layer(new ConvolutionLayer.Builder(kernel).nIn(inputDepth).hasBias(false)
                                .nOut(1).build()) //output: (5-2+0)/1+1 = 4
                                .layer(new SpaceToDepthLayer.Builder(blocks, SpaceToDepthLayer.DataFormat.NCHW)
                                        .build()) // (mb,1,4,4) -> (mb,4,2,2)
                                .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(2 * 2 * 4)
                                        .nOut(nOut).build())
                                .setInputType(InputType.convolutionalFlat(height, width, inputDepth))
                                .build();

                MultiLayerNetwork net = new MultiLayerNetwork(conf);
                net.init();

                String msg = "PoolingType=" + poolingType + ", minibatch=" + minibatchSize + ", activationFn="
                        + afn;

                if (PRINT_RESULTS) {
                    System.out.println(msg);
//                    for (int j = 0; j < net.getnLayers(); j++)
//                        System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams());
                }
                boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

                assertTrue(msg, gradOK);

                TestUtils.testModelSerialization(net);
            }
        }
    }
 
Example #29
Source File: CNNGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testGradientCNNL1L2MLN() {
        if(this.format != CNN2DFormat.NCHW) //Only test NCHW due to flat input format...
            return;

        //Parameterized test, testing combinations of:
        // (a) activation function
        // (b) Whether to test at random initialization, or after some learning (i.e., 'characteristic mode of operation')
        // (c) Loss function (with specified output activations)

        DataSet ds = new IrisDataSetIterator(150, 150).next();
        ds.normalizeZeroMeanZeroUnitVariance();
        INDArray input = ds.getFeatures();
        INDArray labels = ds.getLabels();

        //use l2vals[i] with l1vals[i]
        double[] l2vals = {0.4, 0.0, 0.4, 0.4};
        double[] l1vals = {0.0, 0.0, 0.5, 0.0};
        double[] biasL2 = {0.0, 0.0, 0.0, 0.2};
        double[] biasL1 = {0.0, 0.0, 0.6, 0.0};
        Activation[] activFns = {Activation.SIGMOID, Activation.TANH, Activation.ELU, Activation.SOFTPLUS};
        boolean[] characteristic = {false, true, false, true}; //If true: run some backprop steps first

        LossFunctions.LossFunction[] lossFunctions =
                {LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD, LossFunctions.LossFunction.MSE, LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD, LossFunctions.LossFunction.MSE};
        Activation[] outputActivations = {Activation.SOFTMAX, Activation.TANH, Activation.SOFTMAX, Activation.IDENTITY}; //i.e., lossFunctions[i] used with outputActivations[i] here

        for( int i=0; i<l2vals.length; i++ ){
            Activation afn = activFns[i];
            boolean doLearningFirst = characteristic[i];
            LossFunctions.LossFunction lf = lossFunctions[i];
            Activation outputActivation = outputActivations[i];
            double l2 = l2vals[i];
            double l1 = l1vals[i];

            MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder()
                    .dataType(DataType.DOUBLE)
                    .l2(l2).l1(l1).l2Bias(biasL2[i]).l1Bias(biasL1[i])
                    .optimizationAlgo(
                            OptimizationAlgorithm.CONJUGATE_GRADIENT)
                    .seed(12345L).list()
                    .layer(0, new ConvolutionLayer.Builder(new int[]{1, 1}).nIn(1).nOut(6)
                            .weightInit(WeightInit.XAVIER).activation(afn)
                            .updater(new NoOp()).build())
                    .layer(1, new OutputLayer.Builder(lf).activation(outputActivation).nOut(3)
                            .weightInit(WeightInit.XAVIER).updater(new NoOp()).build())

                    .setInputType(InputType.convolutionalFlat(1, 4, 1));

            MultiLayerConfiguration conf = builder.build();

            MultiLayerNetwork mln = new MultiLayerNetwork(conf);
            mln.init();
            String testName = new Object() {
            }.getClass().getEnclosingMethod().getName();

            if (doLearningFirst) {
                //Run a number of iterations of learning
                mln.setInput(ds.getFeatures());
                mln.setLabels(ds.getLabels());
                mln.computeGradientAndScore();
                double scoreBefore = mln.score();
                for (int j = 0; j < 10; j++)
                    mln.fit(ds);
                mln.computeGradientAndScore();
                double scoreAfter = mln.score();
                //Can't test in 'characteristic mode of operation' if not learning
                String msg = testName
                        + "- score did not (sufficiently) decrease during learning - activationFn="
                        + afn + ", lossFn=" + lf + ", outputActivation=" + outputActivation
                        + ", doLearningFirst=" + doLearningFirst + " (before=" + scoreBefore
                        + ", scoreAfter=" + scoreAfter + ")";
                assertTrue(msg, scoreAfter < 0.8 * scoreBefore);
            }

            if (PRINT_RESULTS) {
                System.out.println(testName + "- activationFn=" + afn + ", lossFn=" + lf
                        + ", outputActivation=" + outputActivation + ", doLearningFirst="
                        + doLearningFirst);
//                for (int j = 0; j < mln.getnLayers(); j++)
//                    System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
            }

            boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

            assertTrue(gradOK);
            TestUtils.testModelSerialization(mln);
        }
    }
 
Example #30
Source File: RnnGradientChecks.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
@Ignore("AB 2019/06/24 - Ignored to get to all passing baseline to prevent regressions via CI - see issue #7912")
public void testSimpleRnn() {
    int nOut = 5;

    double[] l1s = new double[]{0.0, 0.4};
    double[] l2s = new double[]{0.0, 0.6};

    Random r = new Random(12345);
    for (int mb : new int[]{1, 3}) {
        for (int tsLength : new int[]{1, 4}) {
            for (int nIn : new int[]{3, 1}) {
                for (int layerSize : new int[]{4, 1}) {
                    for (boolean inputMask : new boolean[]{false, true}) {
                        for (boolean hasLayerNorm : new boolean[]{true, false}) {
                            for (int l = 0; l < l1s.length; l++) {
                                //Only run 1 of 5 (on average - note RNG seed for deterministic testing) - 25 of 128 test cases (to minimize test time)
                                if(r.nextInt(5) != 0)
                                    continue;

                                INDArray in = Nd4j.rand(new int[]{mb, nIn, tsLength});
                                INDArray labels = Nd4j.create(mb, nOut, tsLength);
                                for (int i = 0; i < mb; i++) {
                                    for (int j = 0; j < tsLength; j++) {
                                        labels.putScalar(i, r.nextInt(nOut), j, 1.0);
                                    }
                                }
                                String maskType = (inputMask ? "inputMask" : "none");

                                INDArray inMask = null;
                                if (inputMask) {
                                    inMask = Nd4j.ones(mb, tsLength);
                                    for (int i = 0; i < mb; i++) {
                                        int firstMaskedStep = tsLength - 1 - i;
                                        if (firstMaskedStep == 0) {
                                            firstMaskedStep = tsLength;
                                        }
                                        for (int j = firstMaskedStep; j < tsLength; j++) {
                                            inMask.putScalar(i, j, 0.0);
                                        }
                                    }
                                }

                                String name = "testSimpleRnn() - mb=" + mb + ", tsLength = " + tsLength + ", maskType=" +
                                        maskType + ", l1=" + l1s[l] + ", l2=" + l2s[l] + ", hasLayerNorm=" + hasLayerNorm;

                                System.out.println("Starting test: " + name);

                                MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                                        .dataType(DataType.DOUBLE)
                                        .updater(new NoOp())
                                        .weightInit(WeightInit.XAVIER)
                                        .activation(Activation.TANH)
                                        .l1(l1s[l])
                                        .l2(l2s[l])
                                        .list()
                                        .layer(new SimpleRnn.Builder().nIn(nIn).nOut(layerSize).hasLayerNorm(hasLayerNorm).build())
                                        .layer(new SimpleRnn.Builder().nIn(layerSize).nOut(layerSize).hasLayerNorm(hasLayerNorm).build())
                                        .layer(new RnnOutputLayer.Builder().nIn(layerSize).nOut(nOut)
                                                .activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT)
                                                .build())
                                        .build();

                                MultiLayerNetwork net = new MultiLayerNetwork(conf);
                                net.init();


                                boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.MLNConfig().net(net).input(in)
                                        .labels(labels).inputMask(inMask));
                                assertTrue(gradOK);
                                TestUtils.testModelSerialization(net);
                            }
                        }
                    }
                }
            }
        }
    }
}