org.nd4j.linalg.api.rng.DefaultRandom Java Examples

The following examples show how to use org.nd4j.linalg.api.rng.DefaultRandom. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RandomTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testLegacyDistribution1() throws Exception {
    NormalDistribution distribution = new NormalDistribution(new DefaultRandom(), 0.0, 1.0);
    INDArray z1 = distribution.sample(new int[] {1, 30000000});

    assertEquals(0.0, z1.meanNumber().doubleValue(), 0.01);
    assertEquals(1.0, z1.stdNumber().doubleValue(), 0.01);
}
 
Example #2
Source File: RandomTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testLegacyDistribution1() {
    NormalDistribution distribution = new NormalDistribution(new DefaultRandom(), 0.0, 1.0);
    INDArray z1 = distribution.sample(new int[] {1, 1000000});

    assertEquals(0.0, z1.meanNumber().doubleValue(), 0.01);
    assertEquals(1.0, z1.stdNumber().doubleValue(), 0.01);
}
 
Example #3
Source File: TestOptimizers.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
private static void testSphereFnMultipleStepsHelper(OptimizationAlgorithm oa, int nOptIter,
                int maxNumLineSearchIter) {
    double[] scores = new double[nOptIter + 1];

    for (int i = 0; i <= nOptIter; i++) {
        Random rng = new DefaultRandom(12345L);
        org.nd4j.linalg.api.rng.distribution.Distribution dist =
                        new org.nd4j.linalg.api.rng.distribution.impl.UniformDistribution(rng, -10, 10);
        NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                        .maxNumLineSearchIterations(maxNumLineSearchIter).updater(new Sgd(0.1))
                        .layer(new DenseLayer.Builder().nIn(1).nOut(1).build()).build();
        conf.addVariable("W"); //Normally done by ParamInitializers, but obviously that isn't done here

        Model m = new SphereFunctionModel(100, dist, conf);
        if (i == 0) {
            m.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces());
            scores[0] = m.score(); //Before optimization
        } else {
            ConvexOptimizer opt = getOptimizer(oa, conf, m);
            for( int j=0; j<100; j++ ) {
                opt.optimize(LayerWorkspaceMgr.noWorkspaces());
            }
            m.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces());
            scores[i] = m.score();
            assertTrue(!Double.isNaN(scores[i]) && !Double.isInfinite(scores[i]));
        }
    }

    if (PRINT_OPT_RESULTS) {
        System.out.println("Multiple optimization iterations (" + nOptIter
                        + " opt. iter.) score vs iteration, maxNumLineSearchIter=" + maxNumLineSearchIter + ": "
                        + oa);
        System.out.println(Arrays.toString(scores));
    }

    for (int i = 1; i < scores.length; i++) {
        assertTrue(scores[i] <= scores[i - 1]);
    }
    assertTrue(scores[scores.length - 1] < 1.0); //Very easy function, expect score ~= 0 with any reasonable number of steps/numLineSearchIter
}
 
Example #4
Source File: UniformProbabilityTestCase.java    From jstarcraft-ai with Apache License 2.0 4 votes vote down vote up
@Override
protected Distribution getOldFunction(int seed) {
    Random random = new DefaultRandom(seed);
    Distribution distribution = new org.nd4j.linalg.api.rng.distribution.impl.UniformDistribution(random, 0.4D, 4D);
    return distribution;
}
 
Example #5
Source File: BinomialProbabilityTestCase.java    From jstarcraft-ai with Apache License 2.0 4 votes vote down vote up
@Override
protected Distribution getOldFunction(int seed) {
    Random random = new DefaultRandom(seed);
    Distribution distribution = new org.nd4j.linalg.api.rng.distribution.impl.BinomialDistribution(random, 10, 0.5D);
    return distribution;
}
 
Example #6
Source File: NormalProbabilityTestCase.java    From jstarcraft-ai with Apache License 2.0 4 votes vote down vote up
@Override
protected Distribution getOldFunction(int seed) {
    Random random = new DefaultRandom(seed);
    Distribution distribution = new org.nd4j.linalg.api.rng.distribution.impl.NormalDistribution(random, 1D, 5D);
    return distribution;
}
 
Example #7
Source File: TestOptimizers.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public void testSphereFnOptHelper(OptimizationAlgorithm oa, int numLineSearchIter, int nDimensions) {

        if (PRINT_OPT_RESULTS)
            System.out.println("---------\n Alg= " + oa + ", nIter= " + numLineSearchIter + ", nDimensions= "
                            + nDimensions);

        NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().maxNumLineSearchIterations(numLineSearchIter)
                        .updater(new Sgd(1e-2))
                        .layer(new DenseLayer.Builder().nIn(1).nOut(1).build()).build();
        conf.addVariable("W"); //Normally done by ParamInitializers, but obviously that isn't done here

        Random rng = new DefaultRandom(12345L);
        org.nd4j.linalg.api.rng.distribution.Distribution dist =
                        new org.nd4j.linalg.api.rng.distribution.impl.UniformDistribution(rng, -10, 10);
        Model m = new SphereFunctionModel(nDimensions, dist, conf);
        m.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces());
        double scoreBefore = m.score();
        assertTrue(!Double.isNaN(scoreBefore) && !Double.isInfinite(scoreBefore));
        if (PRINT_OPT_RESULTS) {
            System.out.println("Before:");
            System.out.println(scoreBefore);
            System.out.println(m.params());
        }

        ConvexOptimizer opt = getOptimizer(oa, conf, m);

        opt.setupSearchState(m.gradientAndScore());
        for( int i=0; i<100; i++ ) {
            opt.optimize(LayerWorkspaceMgr.noWorkspaces());
        }
        m.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces());
        double scoreAfter = m.score();

        assertTrue(!Double.isNaN(scoreAfter) && !Double.isInfinite(scoreAfter));
        if (PRINT_OPT_RESULTS) {
            System.out.println("After:");
            System.out.println(scoreAfter);
            System.out.println(m.params());
        }

        //Expected behaviour after optimization:
        //(a) score is better (lower) after optimization.
        //(b) Parameters are closer to minimum after optimization (TODO)
        assertTrue("Score did not improve after optimization (b= " + scoreBefore + " ,a= " + scoreAfter + ")",
                        scoreAfter < scoreBefore);
    }
 
Example #8
Source File: TestOptimizers.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
private static INDArray initParams(int nDimensions) {
    Random rng = new DefaultRandom(12345L);
    org.nd4j.linalg.api.rng.distribution.Distribution dist =
                    new org.nd4j.linalg.api.rng.distribution.impl.UniformDistribution(rng, -5.12, 5.12);
    return dist.sample(new int[] {1, nDimensions});
}
 
Example #9
Source File: TestOptimizers.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
private static INDArray initParams(int nDimensions) {
    Random rng = new DefaultRandom(12345L);
    org.nd4j.linalg.api.rng.distribution.Distribution dist =
                    new org.nd4j.linalg.api.rng.distribution.impl.UniformDistribution(rng, -4.0, 4.0);
    return dist.sample(new int[] {1, nDimensions});
}