org.deeplearning4j.optimize.listeners.ScoreIterationListener Java Examples
The following examples show how to use
org.deeplearning4j.optimize.listeners.ScoreIterationListener.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestVertxUI.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testUICompGraph() { StatsStorage ss = new InMemoryStatsStorage(); UIServer uiServer = UIServer.getInstance(); uiServer.attach(ss); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") .addLayer("L0", new DenseLayer.Builder().activation(Activation.TANH).nIn(4).nOut(4).build(), "in") .addLayer("L1", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(4).nOut(3).build(), "L0") .setOutputs("L1").build(); ComputationGraph net = new ComputationGraph(conf); net.init(); net.setListeners(new StatsListener(ss), new ScoreIterationListener(1)); DataSetIterator iter = new IrisDataSetIterator(150, 150); for (int i = 0; i < 100; i++) { net.fit(iter); } }
Example #2
Source File: MultiLayerNeuralNetConfigurationTest.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testTrainingListener() { MultiLayerNetwork model1 = new MultiLayerNetwork(getConf()); model1.init(); model1.addListeners( new ScoreIterationListener(1)); MultiLayerNetwork model2 = new MultiLayerNetwork(getConf()); model2.addListeners( new ScoreIterationListener(1)); model2.init(); Layer[] l1 = model1.getLayers(); for (int i = 0; i < l1.length; i++) assertTrue(l1[i].getListeners() != null && l1[i].getListeners().size() == 1); Layer[] l2 = model2.getLayers(); for (int i = 0; i < l2.length; i++) assertTrue(l2[i].getListeners() != null && l2[i].getListeners().size() == 1); }
Example #3
Source File: TestRemoteReceiver.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test @Ignore public void testRemoteFull() throws Exception { //Use this in conjunction with startRemoteUI() MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new DenseLayer.Builder().activation(Activation.TANH).nIn(4).nOut(4).build()) .layer(1, new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(4).nOut(3).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); try(RemoteUIStatsStorageRouter ssr = new RemoteUIStatsStorageRouter("http://localhost:9000")) { net.setListeners(new StatsListener(ssr), new ScoreIterationListener(1)); DataSetIterator iter = new IrisDataSetIterator(150, 150); for (int i = 0; i < 500; i++) { net.fit(iter); // Thread.sleep(100); Thread.sleep(100); } } }
Example #4
Source File: CenterLossOutputLayerTest.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test @Ignore //Should be run manually public void testMNISTConfig() throws Exception { int batchSize = 64; // Test batch size DataSetIterator mnistTrain = new MnistDataSetIterator(batchSize, true, 12345); ComputationGraph net = getCNNMnistConfig(); net.init(); net.setListeners(new ScoreIterationListener(1)); for (int i = 0; i < 50; i++) { net.fit(mnistTrain.next()); Thread.sleep(1000); } Thread.sleep(100000); }
Example #5
Source File: OutputLayerTest.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testSetParams() { NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT) .updater(new Sgd(1e-1)) .layer(new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder().nIn(4).nOut(3) .weightInit(WeightInit.ZERO).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); long numParams = conf.getLayer().initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); OutputLayer l = (OutputLayer) conf.getLayer().instantiate(conf, Collections.<TrainingListener>singletonList(new ScoreIterationListener(1)), 0, params, true, params.dataType()); params = l.params(); l.setParams(params); assertEquals(params, l.params()); }
Example #6
Source File: MultiRegression.java From dl4j-tutorials with MIT License | 5 votes |
public static void main(String[] args){ //Generate the training data DataSetIterator iterator = getTrainingData(batchSize,rng); //Create the network int numInput = 2; int numOutputs = 1; MultiLayerNetwork net = new MultiLayerNetwork(new NeuralNetConfiguration.Builder() .seed(seed) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER) .updater(new Sgd(learningRate)) .list() .layer(0, new OutputLayer.Builder(LossFunctions.LossFunction.MSE) .activation(Activation.IDENTITY) .nIn(numInput).nOut(numOutputs).build()) .pretrain(false).backprop(true).build() ); net.init(); net.setListeners(new ScoreIterationListener(1)); for( int i=0; i<nEpochs; i++ ){ iterator.reset(); net.fit(iterator); } final INDArray input = Nd4j.create(new double[] { 0.111111, 0.3333333333333 }, new int[] { 1, 2 }); INDArray out = net.output(input, false); System.out.println(out); }
Example #7
Source File: TestEarlyStoppingCompGraph.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testBadTuning() { //Test poor tuning (high LR): should terminate on MaxScoreIterationTerminationCondition Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(5.0)) //Intentionally huge LR .weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5000)) .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES), new MaxScoreIterationTerminationCondition(10)) //Initial score is ~2.5 .scoreCalculator(new DataSetLossCalculatorCG(irisIter, true)).modelSaver(saver).build(); IEarlyStoppingTrainer trainer = new EarlyStoppingGraphTrainer(esConf, net, irisIter); EarlyStoppingResult result = trainer.fit(); assertTrue(result.getTotalEpochs() < 5); assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition, result.getTerminationReason()); String expDetails = new MaxScoreIterationTerminationCondition(10).toString(); assertEquals(expDetails, result.getTerminationDetails()); assertEquals(0, result.getBestModelEpoch()); assertNotNull(result.getBestModel()); }
Example #8
Source File: TestEarlyStopping.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testListeners() { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5)) .iterationTerminationConditions( new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES)) .scoreCalculator(new DataSetLossCalculator(irisIter, true)).modelSaver(saver) .build(); LoggingEarlyStoppingListener listener = new LoggingEarlyStoppingListener(); IEarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, net, irisIter, listener); trainer.fit(); assertEquals(1, listener.onStartCallCount); assertEquals(5, listener.onEpochCallCount); assertEquals(1, listener.onCompletionCallCount); }
Example #9
Source File: TestEarlyStopping.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testEarlyStoppingGetBestModel() { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); MultipleEpochsIterator mIter = new MultipleEpochsIterator(10, irisIter); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5)) .iterationTerminationConditions( new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES)) .scoreCalculator(new DataSetLossCalculator(irisIter, true)).modelSaver(saver) .build(); IEarlyStoppingTrainer<MultiLayerNetwork> trainer = new EarlyStoppingTrainer(esConf, net, mIter); EarlyStoppingResult<MultiLayerNetwork> result = trainer.fit(); System.out.println(result); MultiLayerNetwork mln = result.getBestModel(); assertEquals(net.getnLayers(), mln.getnLayers()); assertEquals(net.conf().getOptimizationAlgo(), mln.conf().getOptimizationAlgo()); BaseLayer bl = (BaseLayer) net.conf().getLayer(); assertEquals(bl.getActivationFn().toString(), ((BaseLayer) mln.conf().getLayer()).getActivationFn().toString()); assertEquals(bl.getIUpdater(), ((BaseLayer) mln.conf().getLayer()).getIUpdater()); }
Example #10
Source File: TestEarlyStopping.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testNoImprovementNEpochsTermination() { //Idea: terminate training if score (test set loss) does not improve for 5 consecutive epochs //Simulate this by setting LR = 0.0 Nd4j.getRandom().setSeed(12345); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(100), new ScoreImprovementEpochTerminationCondition(5)) .iterationTerminationConditions( new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES), new MaxScoreIterationTerminationCondition(50)) //Initial score is ~8 .scoreCalculator(new DataSetLossCalculator(irisIter, true)).modelSaver(saver) .build(); IEarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, net, irisIter); EarlyStoppingResult result = trainer.fit(); //Expect no score change due to 0 LR -> terminate after 6 total epochs assertEquals(6, result.getTotalEpochs()); assertEquals(0, result.getBestModelEpoch()); assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason()); String expDetails = new ScoreImprovementEpochTerminationCondition(5).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example #11
Source File: TestEarlyStopping.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testBadTuning() { //Test poor tuning (high LR): should terminate on MaxScoreIterationTerminationCondition Nd4j.getRandom().setSeed(12345); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(5.0)) //Intentionally huge LR .weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5000)) .iterationTerminationConditions( new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES), new MaxScoreIterationTerminationCondition(10)) //Initial score is ~2.5 .scoreCalculator(new DataSetLossCalculator(irisIter, true)).modelSaver(saver) .build(); IEarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, net, irisIter); EarlyStoppingResult result = trainer.fit(); assertTrue(result.getTotalEpochs() < 5); assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition, result.getTerminationReason()); String expDetails = new MaxScoreIterationTerminationCondition(10).toString(); assertEquals(expDetails, result.getTerminationDetails()); assertEquals(0, result.getBestModelEpoch()); assertNotNull(result.getBestModel()); }
Example #12
Source File: TestEarlyStopping.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testEarlyStoppingEveryNEpoch() { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.01)).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5)) .scoreCalculator(new DataSetLossCalculator(irisIter, true)) .evaluateEveryNEpochs(2).modelSaver(saver).build(); IEarlyStoppingTrainer<MultiLayerNetwork> trainer = new EarlyStoppingTrainer(esConf, net, irisIter); EarlyStoppingResult<MultiLayerNetwork> result = trainer.fit(); System.out.println(result); assertEquals(5, result.getTotalEpochs()); assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason()); }
Example #13
Source File: TestEarlyStoppingCompGraph.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testListeners() { ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5)) .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES)) .scoreCalculator(new DataSetLossCalculatorCG(irisIter, true)).modelSaver(saver).build(); LoggingEarlyStoppingListener listener = new LoggingEarlyStoppingListener(); IEarlyStoppingTrainer trainer = new EarlyStoppingGraphTrainer(esConf, net, irisIter, listener); trainer.fit(); assertEquals(1, listener.onStartCallCount); assertEquals(5, listener.onEpochCallCount); assertEquals(1, listener.onCompletionCallCount); }
Example #14
Source File: Gan4Exemple.java From dl4j-tutorials with MIT License | 5 votes |
private static void discInit() throws IOException { discUpdater = new RmsProp(drate); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(1234) .updater(new RmsProp(1e-3)) .weightInit(WeightInit.XAVIER) .list() .layer(new DenseLayer.Builder() .nIn(width * height) .nOut(1024) .activation(Activation.RELU) .build()) .layer(new DenseLayer.Builder() .nIn(1024) .nOut(512) .activation(Activation.RELU) .build()) .layer(new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nIn(512) .nOut(2) .activation(Activation.SOFTMAX) .build()) .setInputType(InputType.feedForward(width * height)) .backprop(true).pretrain(false).build(); discNet = new MultiLayerNetwork(conf); discNet.init(); discNet.setListeners(new ScoreIterationListener(10)); }
Example #15
Source File: Main.java From twse-captcha-solver-dl4j with MIT License | 5 votes |
public static void main(String[] args) throws Exception { long startTime = System.currentTimeMillis(); logger.info("start up time: " + startTime); File modelDir = new File(modelDirPath); // create dir boolean hasDir = modelDir.exists() || modelDir.mkdirs(); logger.info(modelPath); // create model ComputationGraph model = createModel(); // monitor the model score UIServer uiServer = UIServer.getInstance(); StatsStorage statsStorage = new InMemoryStatsStorage(); uiServer.attach(statsStorage); model.setListeners(new ScoreIterationListener(36), new StatsListener(statsStorage)); // construct the iterator MultiDataSetIterator trainMulIterator = new CaptchaSetIterator(batchSize, "train"); MultiDataSetIterator testMulIterator = new CaptchaSetIterator(batchSize, "test"); MultiDataSetIterator validateMulIterator = new CaptchaSetIterator(batchSize, "validate"); // fit for (int i = 0; i < epochs; i++) { System.out.println("Epoch=====================" + i); model.fit(trainMulIterator); } ModelSerializer.writeModel(model, modelPath, true); long endTime = System.currentTimeMillis(); System.out.println("=============run time=====================" + (endTime - startTime)); System.out.println("=====eval model=====test=================="); modelPredict(model, testMulIterator); System.out.println("=====eval model=====validate=================="); modelPredict(model, validateMulIterator); }
Example #16
Source File: TestEarlyStoppingSpark.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testListeners() { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.setListeners(new ScoreIterationListener(5)); JavaRDD<DataSet> irisData = getIris(); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5)) .iterationTerminationConditions( new MaxTimeIterationTerminationCondition(2, TimeUnit.MINUTES)) .scoreCalculator(new SparkDataSetLossCalculator(irisData, true, sc.sc())) .modelSaver(saver).build(); LoggingEarlyStoppingListener listener = new LoggingEarlyStoppingListener(); IEarlyStoppingTrainer<MultiLayerNetwork> trainer = new SparkEarlyStoppingTrainer( getContext().sc(), new ParameterAveragingTrainingMaster(true, Runtime.getRuntime().availableProcessors(), 1, 10, 1, 0), esConf, net, irisData); trainer.setListener(listener); trainer.fit(); assertEquals(1, listener.onStartCallCount); assertEquals(5, listener.onEpochCallCount); assertEquals(1, listener.onCompletionCallCount); }
Example #17
Source File: TestEarlyStoppingSpark.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testBadTuning() { //Test poor tuning (high LR): should terminate on MaxScoreIterationTerminationCondition Nd4j.getRandom().setSeed(12345); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(10.0)) //Intentionally huge LR .weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.IDENTITY) .lossFunction(LossFunctions.LossFunction.MSE).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.setListeners(new ScoreIterationListener(5)); JavaRDD<DataSet> irisData = getIris(); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5000)) .iterationTerminationConditions( new MaxTimeIterationTerminationCondition(2, TimeUnit.MINUTES), new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5 .scoreCalculator(new SparkDataSetLossCalculator(irisData, true, sc.sc())) .modelSaver(saver).build(); IEarlyStoppingTrainer<MultiLayerNetwork> trainer = new SparkEarlyStoppingTrainer(getContext().sc(), new ParameterAveragingTrainingMaster(true, 4, 1, 150 / 4, 1, 0), esConf, net, irisData); EarlyStoppingResult result = trainer.fit(); assertTrue(result.getTotalEpochs() < 5); assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition, result.getTerminationReason()); String expDetails = new MaxScoreIterationTerminationCondition(7.5).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example #18
Source File: TestEarlyStoppingSpark.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testNoImprovementNEpochsTermination() { //Idea: terminate training if score (test set loss) does not improve for 5 consecutive epochs //Simulate this by setting LR = 0.0 Nd4j.getRandom().setSeed(12345); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.setListeners(new ScoreIterationListener(5)); JavaRDD<DataSet> irisData = getIris(); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(100), new ScoreImprovementEpochTerminationCondition(5)) .iterationTerminationConditions(new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5 .scoreCalculator(new SparkDataSetLossCalculator(irisData, true, sc.sc())) .modelSaver(saver).build(); IEarlyStoppingTrainer<MultiLayerNetwork> trainer = new SparkEarlyStoppingTrainer(getContext().sc(), new ParameterAveragingTrainingMaster(true, 4, 1, 150 / 10, 1, 0), esConf, net, irisData); EarlyStoppingResult result = trainer.fit(); //Expect no score change due to 0 LR -> terminate after 6 total epochs assertTrue(result.getTotalEpochs() < 12); //Normally expect 6 epochs exactly; get a little more than that here due to rounding + order of operations assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason()); String expDetails = new ScoreImprovementEpochTerminationCondition(5).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example #19
Source File: TestEarlyStoppingCompGraph.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testTimeTermination() { //test termination after max time Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(1e-6)).weightInit(WeightInit.XAVIER).graphBuilder() .addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(10000)) .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(5, TimeUnit.SECONDS), new MaxScoreIterationTerminationCondition(50)) //Initial score is ~8 .scoreCalculator(new DataSetLossCalculator(irisIter, true)) .modelSaver(saver).build(); IEarlyStoppingTrainer trainer = new EarlyStoppingGraphTrainer(esConf, net, irisIter); long startTime = System.currentTimeMillis(); EarlyStoppingResult result = trainer.fit(); long endTime = System.currentTimeMillis(); int durationSeconds = (int) (endTime - startTime) / 1000; assertTrue(durationSeconds >= 3); assertTrue(durationSeconds <= 20); assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition, result.getTerminationReason()); String expDetails = new MaxTimeIterationTerminationCondition(5, TimeUnit.SECONDS).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example #20
Source File: TestEarlyStoppingCompGraph.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testNoImprovementNEpochsTermination() { //Idea: terminate training if score (test set loss) does not improve for 5 consecutive epochs //Simulate this by setting LR = 0.0 Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).graphBuilder() .addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(100), new ScoreImprovementEpochTerminationCondition(5)) .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES), new MaxScoreIterationTerminationCondition(50)) //Initial score is ~8 .scoreCalculator(new DataSetLossCalculatorCG(irisIter, true)).modelSaver(saver).build(); IEarlyStoppingTrainer trainer = new EarlyStoppingGraphTrainer(esConf, net, irisIter); EarlyStoppingResult result = trainer.fit(); //Expect no score change due to 0 LR -> terminate after 6 total epochs assertEquals(6, result.getTotalEpochs()); assertEquals(0, result.getBestModelEpoch()); assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason()); String expDetails = new ScoreImprovementEpochTerminationCondition(5).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example #21
Source File: TestParallelEarlyStopping.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testEarlyStoppingEveryNEpoch() { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(50, 600); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5)) .scoreCalculator(new DataSetLossCalculator(irisIter, true)) .evaluateEveryNEpochs(2).modelSaver(saver).build(); IEarlyStoppingTrainer<MultiLayerNetwork> trainer = new EarlyStoppingParallelTrainer<>(esConf, net, irisIter, null, 2, 6, 1); EarlyStoppingResult<MultiLayerNetwork> result = trainer.fit(); System.out.println(result); assertEquals(5, result.getTotalEpochs()); assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason()); }
Example #22
Source File: ManualTests.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testLaunch() throws Exception { // UiServer server = UiServer.getInstance(); // // System.out.println("http://localhost:" + server.getPort()+ "/"); Thread.sleep(10000000000L); new ScoreIterationListener(100); fail("not implemneted"); }
Example #23
Source File: WorkspaceTests.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testClearing() { for(WorkspaceMode wsm : WorkspaceMode.values()) { ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder() .updater(new Adam()) .inferenceWorkspaceMode(wsm) .trainingWorkspaceMode(wsm) .graphBuilder() .addInputs("in") .setInputTypes(InputType.recurrent(200)) .addLayer("embeddings", new EmbeddingLayer.Builder().nIn(200).nOut(50).build(), "in") .addLayer("a", new GravesLSTM.Builder().nOut(300).activation(Activation.HARDTANH).build(), "embeddings") .addVertex("b", new LastTimeStepVertex("in"), "a") .addLayer("c", new DenseLayer.Builder().nOut(300).activation(Activation.HARDTANH).build(), "b") .addLayer("output", new LossLayer.Builder().lossFunction(LossFunctions.LossFunction.COSINE_PROXIMITY).build(), "c") .setOutputs("output") .build(); final ComputationGraph computationGraph = new ComputationGraph(config); computationGraph.init(); computationGraph.setListeners(new ScoreIterationListener(3)); WSTestDataSetIterator iterator = new WSTestDataSetIterator(); computationGraph.fit(iterator); } }
Example #24
Source File: OCNNOutputLayerTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
private MultiLayerNetwork getSingleLayer() { int numHidden = 2; MultiLayerConfiguration configuration = new NeuralNetConfiguration.Builder() .seed(12345) .weightInit(WeightInit.XAVIER) .miniBatch(true) .updater(new Adam(0.1)) // .updater(Nesterovs.builder() // .momentum(0.1) // .learningRateSchedule(new StepSchedule( // ScheduleType.EPOCH, // 1e-2, // 0.1, // 20)).build()) .list(new DenseLayer.Builder().activation(new ActivationReLU()) .nIn(4).nOut(2).build(), new org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer.Builder() .nIn(2).activation(new ActivationSigmoid()).initialRValue(0.1) .nu(0.1) .hiddenLayerSize(numHidden).build()) .build(); MultiLayerNetwork network = new MultiLayerNetwork(configuration); network.init(); network.setListeners(new ScoreIterationListener(1)); return network; }
Example #25
Source File: BackPropMLPTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testMLPTrivial() { //Simplest possible case: 1 hidden layer, 1 hidden neuron, batch size of 1. MultiLayerNetwork network = new MultiLayerNetwork(getIrisMLPSimpleConfig(new int[] {1}, Activation.SIGMOID)); network.setListeners(new ScoreIterationListener(1)); network.init(); DataSetIterator iter = new IrisDataSetIterator(1, 10); while (iter.hasNext()) network.fit(iter.next()); }
Example #26
Source File: MultiLayerTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testBatchNorm() { Nd4j.getRandom().setSeed(123); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).seed(123).list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) .activation(Activation.TANH).build()) .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).weightInit(WeightInit.XAVIER) .activation(Activation.TANH).build()) .layer(2, new BatchNormalization.Builder().nOut(2).build()) .layer(3, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( LossFunctions.LossFunction.MCXENT).weightInit(WeightInit.XAVIER) .activation(Activation.SOFTMAX).nIn(2).nOut(3).build()) .build(); MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); network.setListeners(new ScoreIterationListener(1)); DataSetIterator iter = new IrisDataSetIterator(150, 150); DataSet next = iter.next(); next.normalizeZeroMeanZeroUnitVariance(); SplitTestAndTrain trainTest = next.splitTestAndTrain(110); network.setLabels(trainTest.getTrain().getLabels()); network.init(); for( int i=0; i<5; i++ ) { network.fit(trainTest.getTrain()); } }
Example #27
Source File: MultiLayerTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testBackProp() { Nd4j.getRandom().setSeed(123); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).seed(123).list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) .activation(Activation.TANH).build()) .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).weightInit(WeightInit.XAVIER) .activation(Activation.TANH).build()) .layer(2, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( LossFunctions.LossFunction.MCXENT).weightInit(WeightInit.XAVIER) .activation(Activation.SOFTMAX).nIn(2).nOut(3).build()) .build(); MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); network.setListeners(new ScoreIterationListener(1)); DataSetIterator iter = new IrisDataSetIterator(150, 150); DataSet next = iter.next(); next.normalizeZeroMeanZeroUnitVariance(); SplitTestAndTrain trainTest = next.splitTestAndTrain(110); network.setInput(trainTest.getTrain().getFeatures()); network.setLabels(trainTest.getTrain().getLabels()); network.init(); for( int i=0; i<5; i++ ) { network.fit(trainTest.getTrain()); } DataSet test = trainTest.getTest(); Evaluation eval = new Evaluation(); INDArray output = network.output(test.getFeatures()); eval.eval(test.getLabels(), output); log.info("Score " + eval.stats()); }
Example #28
Source File: TestVertxUI.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testUIMultipleSessions() throws Exception { for (int session = 0; session < 3; session++) { StatsStorage ss = new InMemoryStatsStorage(); UIServer uiServer = UIServer.getInstance(); uiServer.attach(ss); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new DenseLayer.Builder().activation(Activation.TANH).nIn(4).nOut(4).build()) .layer(1, new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(4).nOut(3).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); net.setListeners(new StatsListener(ss, 1), new ScoreIterationListener(1)); DataSetIterator iter = new IrisDataSetIterator(150, 150); for (int i = 0; i < 20; i++) { net.fit(iter); Thread.sleep(100); } } }
Example #29
Source File: DataSetIteratorTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testLfwModel() throws Exception { final int numRows = 28; final int numColumns = 28; int numChannels = 3; int outputNum = LFWLoader.NUM_LABELS; int numSamples = LFWLoader.NUM_IMAGES; int batchSize = 2; int seed = 123; int listenerFreq = 1; LFWDataSetIterator lfw = new LFWDataSetIterator(batchSize, numSamples, new int[] {numRows, numColumns, numChannels}, outputNum, false, true, 1.0, new Random(seed)); MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new ConvolutionLayer.Builder(5, 5).nIn(numChannels).nOut(6) .weightInit(WeightInit.XAVIER).activation(Activation.RELU).build()) .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2}) .stride(1, 1).build()) .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX) .build()) .setInputType(InputType.convolutionalFlat(numRows, numColumns, numChannels)) ; MultiLayerNetwork model = new MultiLayerNetwork(builder.build()); model.init(); model.setListeners(new ScoreIterationListener(listenerFreq)); model.fit(lfw.next()); DataSet dataTest = lfw.next(); INDArray output = model.output(dataTest.getFeatures()); Evaluation eval = new Evaluation(outputNum); eval.eval(dataTest.getLabels(), output); // System.out.println(eval.stats()); }
Example #30
Source File: TestVertxUI.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testUI_VAE() throws Exception { //Variational autoencoder - for unsupervised layerwise pretraining StatsStorage ss = new InMemoryStatsStorage(); UIServer uiServer = UIServer.getInstance(); uiServer.attach(ss); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(1e-5)) .list().layer(0, new VariationalAutoencoder.Builder().nIn(4).nOut(3).encoderLayerSizes(10, 11) .decoderLayerSizes(12, 13).weightInit(WeightInit.XAVIER) .pzxActivationFunction(Activation.IDENTITY) .reconstructionDistribution( new GaussianReconstructionDistribution()) .activation(Activation.LEAKYRELU).build()) .layer(1, new VariationalAutoencoder.Builder().nIn(3).nOut(3).encoderLayerSizes(7) .decoderLayerSizes(8).weightInit(WeightInit.XAVIER) .pzxActivationFunction(Activation.IDENTITY) .reconstructionDistribution(new GaussianReconstructionDistribution()) .activation(Activation.LEAKYRELU).build()) .layer(2, new OutputLayer.Builder().nIn(3).nOut(3).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); net.setListeners(new StatsListener(ss), new ScoreIterationListener(1)); DataSetIterator iter = new IrisDataSetIterator(150, 150); for (int i = 0; i < 50; i++) { net.fit(iter); Thread.sleep(100); } }