org.deeplearning4j.earlystopping.EarlyStoppingConfiguration Java Examples
The following examples show how to use
org.deeplearning4j.earlystopping.EarlyStoppingConfiguration.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BaseSparkEarlyStoppingTrainer.java From deeplearning4j with Apache License 2.0 | 6 votes |
protected BaseSparkEarlyStoppingTrainer(JavaSparkContext sc, EarlyStoppingConfiguration<T> esConfig, T net, JavaRDD<DataSet> train, JavaRDD<MultiDataSet> trainMulti, EarlyStoppingListener<T> listener) { if ((esConfig.getEpochTerminationConditions() == null || esConfig.getEpochTerminationConditions().isEmpty()) && (esConfig.getIterationTerminationConditions() == null || esConfig.getIterationTerminationConditions().isEmpty())) { throw new IllegalArgumentException( "Cannot conduct early stopping without a termination condition (both Iteration " + "and Epoch termination conditions are null/empty)"); } this.sc = sc; this.esConfig = esConfig; this.net = net; this.train = train; this.trainMulti = trainMulti; this.listener = listener; }
Example #2
Source File: BaseEarlyStoppingTrainer.java From deeplearning4j with Apache License 2.0 | 6 votes |
protected BaseEarlyStoppingTrainer(EarlyStoppingConfiguration<T> earlyStoppingConfiguration, T model, DataSetIterator train, MultiDataSetIterator trainMulti, EarlyStoppingListener<T> listener) { if(train != null && train.asyncSupported()){ train = new AsyncDataSetIterator(train); } if(trainMulti != null && trainMulti.asyncSupported()){ trainMulti = new AsyncMultiDataSetIterator(trainMulti); } this.esConfig = earlyStoppingConfiguration; this.model = model; this.train = train; this.trainMulti = trainMulti; this.iterator = (train != null ? train : trainMulti); this.listener = listener; }
Example #3
Source File: TestEarlyStoppingSpark.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testNoImprovementNEpochsTermination() { //Idea: terminate training if score (test set loss) does not improve for 5 consecutive epochs //Simulate this by setting LR = 0.0 Nd4j.getRandom().setSeed(12345); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.setListeners(new ScoreIterationListener(5)); JavaRDD<DataSet> irisData = getIris(); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(100), new ScoreImprovementEpochTerminationCondition(5)) .iterationTerminationConditions(new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5 .scoreCalculator(new SparkDataSetLossCalculator(irisData, true, sc.sc())) .modelSaver(saver).build(); IEarlyStoppingTrainer<MultiLayerNetwork> trainer = new SparkEarlyStoppingTrainer(getContext().sc(), new ParameterAveragingTrainingMaster(true, 4, 1, 150 / 10, 1, 0), esConf, net, irisData); EarlyStoppingResult result = trainer.fit(); //Expect no score change due to 0 LR -> terminate after 6 total epochs assertTrue(result.getTotalEpochs() < 12); //Normally expect 6 epochs exactly; get a little more than that here due to rounding + order of operations assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason()); String expDetails = new ScoreImprovementEpochTerminationCondition(5).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example #4
Source File: TestEarlyStoppingSparkCompGraph.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testBadTuning() { //Test poor tuning (high LR): should terminate on MaxScoreIterationTerminationCondition Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(2.0)) //Intentionally huge LR .weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.IDENTITY) .lossFunction(LossFunctions.LossFunction.MSE).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(5)); JavaRDD<DataSet> irisData = getIris(); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5000)) .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(2, TimeUnit.MINUTES), new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5 .scoreCalculator(new SparkLossCalculatorComputationGraph( irisData.map(new DataSetToMultiDataSetFn()), true, sc.sc())) .modelSaver(saver).build(); TrainingMaster tm = new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0); IEarlyStoppingTrainer<ComputationGraph> trainer = new SparkEarlyStoppingGraphTrainer(getContext().sc(), tm, esConf, net, irisData.map(new DataSetToMultiDataSetFn())); EarlyStoppingResult result = trainer.fit(); assertTrue(result.getTotalEpochs() < 5); assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition, result.getTerminationReason()); String expDetails = new MaxScoreIterationTerminationCondition(7.5).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example #5
Source File: TestEarlyStoppingSparkCompGraph.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testNoImprovementNEpochsTermination() { //Idea: terminate training if score (test set loss) does not improve for 5 consecutive epochs //Simulate this by setting LR = 0.0 Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).graphBuilder() .addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(5)); JavaRDD<DataSet> irisData = getIris(); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(100), new ScoreImprovementEpochTerminationCondition(5)) .iterationTerminationConditions(new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5 .scoreCalculator(new SparkLossCalculatorComputationGraph( irisData.map(new DataSetToMultiDataSetFn()), true, sc.sc())) .modelSaver(saver).build(); TrainingMaster tm = new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0); IEarlyStoppingTrainer<ComputationGraph> trainer = new SparkEarlyStoppingGraphTrainer(getContext().sc(), tm, esConf, net, irisData.map(new DataSetToMultiDataSetFn())); EarlyStoppingResult result = trainer.fit(); //Expect no score change due to 0 LR -> terminate after 6 total epochs assertTrue(result.getTotalEpochs() < 12); //Normally expect 6 epochs exactly; get a little more than that here due to rounding + order of operations assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason()); String expDetails = new ScoreImprovementEpochTerminationCondition(5).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example #6
Source File: TestEarlyStoppingSparkCompGraph.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testListeners() { ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(5)); JavaRDD<DataSet> irisData = getIris(); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5)) .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(2, TimeUnit.MINUTES)) .scoreCalculator(new SparkLossCalculatorComputationGraph( irisData.map(new DataSetToMultiDataSetFn()), true, sc.sc())) .modelSaver(saver).build(); LoggingEarlyStoppingListener listener = new LoggingEarlyStoppingListener(); TrainingMaster tm = new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0); IEarlyStoppingTrainer<ComputationGraph> trainer = new SparkEarlyStoppingGraphTrainer(getContext().sc(), tm, esConf, net, irisData.map(new DataSetToMultiDataSetFn())); trainer.setListener(listener); trainer.fit(); assertEquals(1, listener.onStartCallCount); assertEquals(5, listener.onEpochCallCount); assertEquals(1, listener.onCompletionCallCount); }
Example #7
Source File: TestEarlyStoppingSpark.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testBadTuning() { //Test poor tuning (high LR): should terminate on MaxScoreIterationTerminationCondition Nd4j.getRandom().setSeed(12345); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(10.0)) //Intentionally huge LR .weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.IDENTITY) .lossFunction(LossFunctions.LossFunction.MSE).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.setListeners(new ScoreIterationListener(5)); JavaRDD<DataSet> irisData = getIris(); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5000)) .iterationTerminationConditions( new MaxTimeIterationTerminationCondition(2, TimeUnit.MINUTES), new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5 .scoreCalculator(new SparkDataSetLossCalculator(irisData, true, sc.sc())) .modelSaver(saver).build(); IEarlyStoppingTrainer<MultiLayerNetwork> trainer = new SparkEarlyStoppingTrainer(getContext().sc(), new ParameterAveragingTrainingMaster(true, 4, 1, 150 / 4, 1, 0), esConf, net, irisData); EarlyStoppingResult result = trainer.fit(); assertTrue(result.getTotalEpochs() < 5); assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition, result.getTerminationReason()); String expDetails = new MaxScoreIterationTerminationCondition(7.5).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example #8
Source File: TestParallelEarlyStoppingUI.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test @Ignore //To be run manually public void testParallelStatsListenerCompatibility() throws Exception { UIServer uiServer = UIServer.getInstance(); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).build()) .layer(1, new OutputLayer.Builder().nIn(3).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); // it's important that the UI can report results from parallel training // there's potential for StatsListener to fail if certain properties aren't set in the model StatsStorage statsStorage = new InMemoryStatsStorage(); net.setListeners(new StatsListener(statsStorage)); uiServer.attach(statsStorage); DataSetIterator irisIter = new IrisDataSetIterator(50, 500); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(500)) .scoreCalculator(new DataSetLossCalculator(irisIter, true)) .evaluateEveryNEpochs(2).modelSaver(saver).build(); IEarlyStoppingTrainer<MultiLayerNetwork> trainer = new EarlyStoppingParallelTrainer<>(esConf, net, irisIter, null, 3, 6, 2); EarlyStoppingResult<MultiLayerNetwork> result = trainer.fit(); System.out.println(result); assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason()); }
Example #9
Source File: TestParallelEarlyStopping.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testBadTuning() { //Test poor tuning (high LR): should terminate on MaxScoreIterationTerminationCondition Nd4j.getRandom().setSeed(12345); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(1.0)) //Intentionally huge LR .weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(10, 150); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5000)) .iterationTerminationConditions( new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES), new MaxScoreIterationTerminationCondition(10)) //Initial score is ~2.5 .scoreCalculator(new DataSetLossCalculator(irisIter, true)).modelSaver(saver) .build(); IEarlyStoppingTrainer<MultiLayerNetwork> trainer = new EarlyStoppingParallelTrainer<>(esConf, net, irisIter, null, 2, 2, 1); EarlyStoppingResult result = trainer.fit(); assertTrue(result.getTotalEpochs() < 5); assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition, result.getTerminationReason()); String expDetails = new MaxScoreIterationTerminationCondition(10).toString(); assertEquals(expDetails, result.getTerminationDetails()); assertTrue(result.getBestModelEpoch() <= 0); assertNotNull(result.getBestModel()); }
Example #10
Source File: TestParallelEarlyStopping.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testEarlyStoppingEveryNEpoch() { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(50, 600); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5)) .scoreCalculator(new DataSetLossCalculator(irisIter, true)) .evaluateEveryNEpochs(2).modelSaver(saver).build(); IEarlyStoppingTrainer<MultiLayerNetwork> trainer = new EarlyStoppingParallelTrainer<>(esConf, net, irisIter, null, 2, 6, 1); EarlyStoppingResult<MultiLayerNetwork> result = trainer.fit(); System.out.println(result); assertEquals(5, result.getTotalEpochs()); assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason()); }
Example #11
Source File: TestEarlyStoppingSpark.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testListeners() { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.setListeners(new ScoreIterationListener(5)); JavaRDD<DataSet> irisData = getIris(); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5)) .iterationTerminationConditions( new MaxTimeIterationTerminationCondition(2, TimeUnit.MINUTES)) .scoreCalculator(new SparkDataSetLossCalculator(irisData, true, sc.sc())) .modelSaver(saver).build(); LoggingEarlyStoppingListener listener = new LoggingEarlyStoppingListener(); IEarlyStoppingTrainer<MultiLayerNetwork> trainer = new SparkEarlyStoppingTrainer( getContext().sc(), new ParameterAveragingTrainingMaster(true, Runtime.getRuntime().availableProcessors(), 1, 10, 1, 0), esConf, net, irisData); trainer.setListener(listener); trainer.fit(); assertEquals(1, listener.onStartCallCount); assertEquals(5, listener.onEpochCallCount); assertEquals(1, listener.onCompletionCallCount); }
Example #12
Source File: EarlyStoppingGraphTrainer.java From deeplearning4j with Apache License 2.0 | 5 votes |
/**Constructor for training using a {@link DataSetIterator} * @param esConfig Configuration * @param net Network to train using early stopping * @param train DataSetIterator for training the network * @param listener Early stopping listener. May be null. */ public EarlyStoppingGraphTrainer(EarlyStoppingConfiguration<ComputationGraph> esConfig, ComputationGraph net, DataSetIterator train, EarlyStoppingListener<ComputationGraph> listener) { super(esConfig, net, train, null, listener); if (net.getNumInputArrays() != 1 || net.getNumOutputArrays() != 1) throw new IllegalStateException( "Cannot do early stopping training on ComputationGraph with DataSetIterator: graph does not have 1 input and 1 output array"); this.net = net; }
Example #13
Source File: TestEarlyStoppingSpark.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public void onStart(EarlyStoppingConfiguration esConfig, MultiLayerNetwork net) { log.info("EarlyStopping: onStart called"); onStartCallCount++; }
Example #14
Source File: TestEarlyStoppingSpark.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public void onEpoch(int epochNum, double score, EarlyStoppingConfiguration esConfig, MultiLayerNetwork net) { log.info("EarlyStopping: onEpoch called (epochNum={}, score={}}", epochNum, score); onEpochCallCount++; }
Example #15
Source File: SparkEarlyStoppingTrainer.java From deeplearning4j with Apache License 2.0 | 4 votes |
public SparkEarlyStoppingTrainer(JavaSparkContext sc, TrainingMaster trainingMaster, EarlyStoppingConfiguration<MultiLayerNetwork> esConfig, MultiLayerNetwork net, JavaRDD<DataSet> train) { this(sc, trainingMaster, esConfig, net, train, null); }
Example #16
Source File: MNISTOptimizationTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(3)) .iterationTerminationConditions( new MaxTimeIterationTerminationCondition(5, TimeUnit.MINUTES), new MaxScoreIterationTerminationCondition(4.6) //Random score: -log_e(0.1) ~= 2.3 ).scoreCalculator(new DataSetLossCalculator(new MnistDataSetIterator(64, 2000, false, false, true, 123), true)).modelSaver(new InMemoryModelSaver()).build(); //Define: network config (hyperparameter space) MultiLayerSpace mls = new MultiLayerSpace.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new SgdSpace(new ContinuousParameterSpace(0.0001, 0.2))) .l2(new ContinuousParameterSpace(0.0001, 0.05)) .addLayer( new ConvolutionLayerSpace.Builder().nIn(1) .nOut(new IntegerParameterSpace(5, 30)) .kernelSize(new DiscreteParameterSpace<>(new int[] {3, 3}, new int[] {4, 4}, new int[] {5, 5})) .stride(new DiscreteParameterSpace<>(new int[] {1, 1}, new int[] {2, 2})) .activation(new DiscreteParameterSpace<>(Activation.RELU, Activation.SOFTPLUS, Activation.LEAKYRELU)) .build(), new IntegerParameterSpace(1, 2)) //1-2 identical layers .addLayer(new DenseLayerSpace.Builder().nIn(4).nOut(new IntegerParameterSpace(2, 10)) .activation(new DiscreteParameterSpace<>(Activation.RELU, Activation.TANH)) .build(), new IntegerParameterSpace(0, 1)) //0 to 1 layers .addLayer(new OutputLayerSpace.Builder().nOut(10).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .earlyStoppingConfiguration(esConf).build(); Map<String, Object> commands = new HashMap<>(); commands.put(DataSetIteratorFactoryProvider.FACTORY_KEY, TestDataFactoryProviderMnist.class.getCanonicalName()); //Define configuration: CandidateGenerator candidateGenerator = new RandomSearchGenerator(mls, commands); DataProvider dataProvider = new MnistDataSetProvider(); String modelSavePath = new File(System.getProperty("java.io.tmpdir"), "ArbiterMNISTSmall\\").getAbsolutePath(); File f = new File(modelSavePath); if (f.exists()) f.delete(); f.mkdir(); if (!f.exists()) throw new RuntimeException(); OptimizationConfiguration configuration = new OptimizationConfiguration.Builder() .candidateGenerator(candidateGenerator) .dataProvider(dataProvider) .modelSaver(new FileModelSaver(modelSavePath)).scoreFunction(new TestSetLossScoreFunction(true)) .terminationConditions(new MaxTimeCondition(120, TimeUnit.MINUTES), new MaxCandidatesCondition(100)) .build(); IOptimizationRunner runner = new LocalOptimizationRunner(configuration, new MultiLayerNetworkTaskCreator()); // ArbiterUIServer server = ArbiterUIServer.getInstance(); // runner.addListeners(new UIOptimizationRunnerStatusListener(server)); runner.execute(); System.out.println("----- COMPLETE -----"); }
Example #17
Source File: TestDL4JLocalExecution.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test @Ignore public void testLocalExecutionEarlyStopping() throws Exception { EarlyStoppingConfiguration esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(100)) .scoreCalculator(new DataSetLossCalculator(new IrisDataSetIterator(150, 150), true)) .modelSaver(new InMemoryModelSaver()).build(); Map<String, Object> commands = new HashMap<>(); commands.put(DataSetIteratorFactoryProvider.FACTORY_KEY, TestDataFactoryProviderMnist.class.getCanonicalName()); //Define: network config (hyperparameter space) MultiLayerSpace mls = new MultiLayerSpace.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new SgdSpace(new ContinuousParameterSpace(0.0001, 0.1))) .l2(new ContinuousParameterSpace(0.0001, 0.01)) .addLayer(new DenseLayerSpace.Builder().nIn(4).nOut(new IntegerParameterSpace(2, 10)) .activation(new DiscreteParameterSpace<>(Activation.RELU, Activation.TANH)) .build(), new IntegerParameterSpace(1, 2)) //1-2 identical layers (except nIn) .addLayer(new OutputLayerSpace.Builder().nOut(3).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .earlyStoppingConfiguration(esConf).build(); //Define configuration: CandidateGenerator candidateGenerator = new RandomSearchGenerator(mls, commands); DataProvider dataProvider = new DataSetIteratorFactoryProvider(); String modelSavePath = new File(System.getProperty("java.io.tmpdir"), "ArbiterDL4JTest2\\").getAbsolutePath(); File f = new File(modelSavePath); if (f.exists()) f.delete(); f.mkdir(); f.deleteOnExit(); if (!f.exists()) throw new RuntimeException(); OptimizationConfiguration configuration = new OptimizationConfiguration.Builder() .candidateGenerator(candidateGenerator).dataProvider(dataProvider) .modelSaver(new FileModelSaver(modelSavePath)).scoreFunction(new TestSetLossScoreFunction()) .terminationConditions(new MaxTimeCondition(2, TimeUnit.MINUTES), new MaxCandidatesCondition(100)) .build(); IOptimizationRunner runner = new LocalOptimizationRunner(configuration, new MultiLayerNetworkTaskCreator(new ClassificationEvaluator())); runner.execute(); System.out.println("----- COMPLETE -----"); }
Example #18
Source File: TestEarlyStoppingSpark.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testTimeTermination() { //test termination after max time Nd4j.getRandom().setSeed(12345); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(1e-6)).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.setListeners(new ScoreIterationListener(5)); JavaRDD<DataSet> irisData = getIris(); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(10000)) .iterationTerminationConditions( new MaxTimeIterationTerminationCondition(3, TimeUnit.SECONDS), new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5 .scoreCalculator(new SparkDataSetLossCalculator(irisData, true, sc.sc())) .modelSaver(saver).build(); IEarlyStoppingTrainer<MultiLayerNetwork> trainer = new SparkEarlyStoppingTrainer(getContext().sc(), new ParameterAveragingTrainingMaster(true, 4, 1, 150 / 15, 1, 0), esConf, net, irisData); long startTime = System.currentTimeMillis(); EarlyStoppingResult result = trainer.fit(); long endTime = System.currentTimeMillis(); int durationSeconds = (int) (endTime - startTime) / 1000; assertTrue("durationSeconds = " + durationSeconds, durationSeconds >= 3); assertTrue("durationSeconds = " + durationSeconds, durationSeconds <= 20); assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition, result.getTerminationReason()); String expDetails = new MaxTimeIterationTerminationCondition(3, TimeUnit.SECONDS).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example #19
Source File: TestJson.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testOptimizationFromJson() { EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(100)) .scoreCalculator(new DataSetLossCalculatorCG(new IrisDataSetIterator(150, 150), true)) .modelSaver(new InMemoryModelSaver<ComputationGraph>()).build(); //Define: network config (hyperparameter space) ComputationGraphSpace cgs = new ComputationGraphSpace.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new AdaMaxSpace(new ContinuousParameterSpace(0.0001, 0.1))) .l2(new ContinuousParameterSpace(0.0001, 0.01)).addInputs("in") .setInputTypes(InputType.feedForward(4)) .addLayer("first", new DenseLayerSpace.Builder().nIn(4).nOut(new IntegerParameterSpace(2, 10)) .activation(new DiscreteParameterSpace<>(Activation.RELU, Activation.TANH)) .build(), "in") //1-2 identical layers (except nIn) .addLayer("out", new OutputLayerSpace.Builder().nOut(3).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "first") .setOutputs("out").earlyStoppingConfiguration(esConf).build(); //Define configuration: Map<String, Object> commands = new HashMap<>(); commands.put(DataSetIteratorFactoryProvider.FACTORY_KEY, TestDataFactoryProviderMnist.class.getCanonicalName()); CandidateGenerator candidateGenerator = new RandomSearchGenerator(cgs, commands); DataProvider dataProvider = new DataSetIteratorFactoryProvider(); OptimizationConfiguration configuration = new OptimizationConfiguration.Builder().candidateGenerator(candidateGenerator) .dataProvider(dataProvider).scoreFunction(new TestSetLossScoreFunction()) .terminationConditions(new MaxTimeCondition(2, TimeUnit.MINUTES), new MaxCandidatesCondition(100)) .build(); String json = configuration.toJson(); OptimizationConfiguration loadConf = OptimizationConfiguration.fromJson(json); assertEquals(configuration, loadConf); }
Example #20
Source File: TestEarlyStoppingSparkCompGraph.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public void onEpoch(int epochNum, double score, EarlyStoppingConfiguration esConfig, ComputationGraph net) { log.info("EarlyStopping: onEpoch called (epochNum={}, score={}}", epochNum, score); onEpochCallCount++; }
Example #21
Source File: TestEarlyStoppingSparkCompGraph.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public void onStart(EarlyStoppingConfiguration esConfig, ComputationGraph net) { log.info("EarlyStopping: onStart called"); onStartCallCount++; }
Example #22
Source File: TestGraphLocalExecution.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testLocalExecutionEarlyStopping() throws Exception { EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(2)) .scoreCalculator(new ScoreProvider()) .modelSaver(new InMemoryModelSaver()).build(); Map<String, Object> commands = new HashMap<>(); commands.put(DataSetIteratorFactoryProvider.FACTORY_KEY, TestDataFactoryProviderMnist.class.getCanonicalName()); //Define: network config (hyperparameter space) ComputationGraphSpace cgs = new ComputationGraphSpace.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new AdamSpace(new ContinuousParameterSpace(0.0001, 0.1))) .l2(new ContinuousParameterSpace(0.0001, 0.01)).addInputs("in") .setInputTypes(InputType.feedForward(784)) .addLayer("first", new DenseLayerSpace.Builder().nIn(784).nOut(new IntegerParameterSpace(2, 10)) .activation(new DiscreteParameterSpace<>(Activation.RELU, Activation.TANH)) .build(), "in") //1-2 identical layers (except nIn) .addLayer("out", new OutputLayerSpace.Builder().nOut(10).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "first") .setOutputs("out").earlyStoppingConfiguration(esConf).build(); //Define configuration: CandidateGenerator candidateGenerator = new RandomSearchGenerator(cgs, commands); DataProvider dataProvider = new DataSetIteratorFactoryProvider(); String modelSavePath = new File(System.getProperty("java.io.tmpdir"), "ArbiterDL4JTest2CG\\").getAbsolutePath(); File f = new File(modelSavePath); if (f.exists()) f.delete(); f.mkdir(); f.deleteOnExit(); if (!f.exists()) throw new RuntimeException(); OptimizationConfiguration configuration = new OptimizationConfiguration.Builder() .candidateGenerator(candidateGenerator) .dataProvider(dataProvider) .scoreFunction(ScoreFunctions.testSetF1()) .modelSaver(new FileModelSaver(modelSavePath)) .terminationConditions(new MaxTimeCondition(15, TimeUnit.SECONDS), new MaxCandidatesCondition(3)) .build(); IOptimizationRunner runner = new LocalOptimizationRunner(configuration, new ComputationGraphTaskCreator()); runner.execute(); assertEquals(0, runner.numCandidatesFailed()); assertTrue(runner.numCandidatesCompleted() > 0); }
Example #23
Source File: LearnDigitsBackprop.java From aifh with Apache License 2.0 | 4 votes |
/** * The main method. * @param args Not used. */ public static void main(String[] args) { try { int seed = 43; double learningRate = 1e-2; int nEpochs = 50; int batchSize = 500; // Setup training data. System.out.println("Please wait, reading MNIST training data."); String dir = System.getProperty("user.dir"); MNISTReader trainingReader = MNIST.loadMNIST(dir, true); MNISTReader validationReader = MNIST.loadMNIST(dir, false); DataSet trainingSet = trainingReader.getData(); DataSet validationSet = validationReader.getData(); DataSetIterator trainSetIterator = new ListDataSetIterator(trainingSet.asList(), batchSize); DataSetIterator validationSetIterator = new ListDataSetIterator(validationSet.asList(), validationReader.getNumRows()); System.out.println("Training set size: " + trainingReader.getNumImages()); System.out.println("Validation set size: " + validationReader.getNumImages()); System.out.println(trainingSet.get(0).getFeatures().size(1)); System.out.println(validationSet.get(0).getFeatures().size(1)); int numInputs = trainingReader.getNumCols()*trainingReader.getNumRows(); int numOutputs = 10; int numHiddenNodes = 200; // Create neural network. MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(seed) .iterations(1) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .learningRate(learningRate) .updater(Updater.NESTEROVS).momentum(0.9) .regularization(true).dropOut(0.50) .list(2) .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes) .weightInit(WeightInit.XAVIER) .activation("relu") .build()) .layer(1, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD) .weightInit(WeightInit.XAVIER) .activation("softmax") .nIn(numHiddenNodes).nOut(numOutputs).build()) .pretrain(false).backprop(true).build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); model.setListeners(new ScoreIterationListener(1)); // Define when we want to stop training. EarlyStoppingModelSaver saver = new InMemoryModelSaver(); EarlyStoppingConfiguration esConf = new EarlyStoppingConfiguration.Builder() //.epochTerminationConditions(new MaxEpochsTerminationCondition(10)) .epochTerminationConditions(new ScoreImprovementEpochTerminationCondition(5)) .evaluateEveryNEpochs(1) .scoreCalculator(new DataSetLossCalculator(validationSetIterator, true)) //Calculate test set score .modelSaver(saver) .build(); EarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, conf, trainSetIterator); // Train and display result. EarlyStoppingResult result = trainer.fit(); System.out.println("Termination reason: " + result.getTerminationReason()); System.out.println("Termination details: " + result.getTerminationDetails()); System.out.println("Total epochs: " + result.getTotalEpochs()); System.out.println("Best epoch number: " + result.getBestModelEpoch()); System.out.println("Score at best epoch: " + result.getBestModelScore()); model = saver.getBestModel(); // Evaluate Evaluation eval = new Evaluation(numOutputs); validationSetIterator.reset(); for (int i = 0; i < validationSet.numExamples(); i++) { DataSet t = validationSet.get(i); INDArray features = t.getFeatureMatrix(); INDArray labels = t.getLabels(); INDArray predicted = model.output(features, false); eval.eval(labels, predicted); } //Print the evaluation statistics System.out.println(eval.stats()); } catch(Exception ex) { ex.printStackTrace(); } }
Example #24
Source File: TestEarlyStoppingSparkCompGraph.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testTimeTermination() { //test termination after max time Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(1e-6)).weightInit(WeightInit.XAVIER).graphBuilder() .addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(5)); JavaRDD<DataSet> irisData = getIris(); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(10000)) .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(3, TimeUnit.SECONDS), new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5 .scoreCalculator(new SparkLossCalculatorComputationGraph( irisData.map(new DataSetToMultiDataSetFn()), true, sc.sc())) .modelSaver(saver).build(); TrainingMaster tm = new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0); IEarlyStoppingTrainer<ComputationGraph> trainer = new SparkEarlyStoppingGraphTrainer(getContext().sc(), tm, esConf, net, irisData.map(new DataSetToMultiDataSetFn())); long startTime = System.currentTimeMillis(); EarlyStoppingResult result = trainer.fit(); long endTime = System.currentTimeMillis(); int durationSeconds = (int) (endTime - startTime) / 1000; assertTrue(durationSeconds >= 3); assertTrue(durationSeconds <= 20); assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition, result.getTerminationReason()); String expDetails = new MaxTimeIterationTerminationCondition(3, TimeUnit.SECONDS).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example #25
Source File: LearnDigitsDropout.java From aifh with Apache License 2.0 | 4 votes |
/** * The main method. * @param args Not used. */ public static void main(String[] args) { try { int seed = 43; double learningRate = 1e-2; int nEpochs = 50; int batchSize = 500; // Setup training data. System.out.println("Please wait, reading MNIST training data."); String dir = System.getProperty("user.dir"); MNISTReader trainingReader = MNIST.loadMNIST(dir, true); MNISTReader validationReader = MNIST.loadMNIST(dir, false); DataSet trainingSet = trainingReader.getData(); DataSet validationSet = validationReader.getData(); DataSetIterator trainSetIterator = new ListDataSetIterator(trainingSet.asList(), batchSize); DataSetIterator validationSetIterator = new ListDataSetIterator(validationSet.asList(), validationReader.getNumRows()); System.out.println("Training set size: " + trainingReader.getNumImages()); System.out.println("Validation set size: " + validationReader.getNumImages()); System.out.println(trainingSet.get(0).getFeatures().size(1)); System.out.println(validationSet.get(0).getFeatures().size(1)); int numInputs = trainingReader.getNumCols()*trainingReader.getNumRows(); int numOutputs = 10; int numHiddenNodes = 100; // Create neural network. MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(seed) .iterations(1) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .learningRate(learningRate) .updater(Updater.NESTEROVS).momentum(0.9) .list(2) .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes) .weightInit(WeightInit.XAVIER) .activation("relu") .build()) .layer(1, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD) .weightInit(WeightInit.XAVIER) .activation("softmax") .nIn(numHiddenNodes).nOut(numOutputs).build()) .pretrain(false).backprop(true).build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); model.setListeners(new ScoreIterationListener(1)); // Define when we want to stop training. EarlyStoppingModelSaver saver = new InMemoryModelSaver(); EarlyStoppingConfiguration esConf = new EarlyStoppingConfiguration.Builder() //.epochTerminationConditions(new MaxEpochsTerminationCondition(10)) .epochTerminationConditions(new ScoreImprovementEpochTerminationCondition(5)) .evaluateEveryNEpochs(1) .scoreCalculator(new DataSetLossCalculator(validationSetIterator, true)) //Calculate test set score .modelSaver(saver) .build(); EarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, conf, trainSetIterator); // Train and display result. EarlyStoppingResult result = trainer.fit(); System.out.println("Termination reason: " + result.getTerminationReason()); System.out.println("Termination details: " + result.getTerminationDetails()); System.out.println("Total epochs: " + result.getTotalEpochs()); System.out.println("Best epoch number: " + result.getBestModelEpoch()); System.out.println("Score at best epoch: " + result.getBestModelScore()); model = saver.getBestModel(); // Evaluate Evaluation eval = new Evaluation(numOutputs); validationSetIterator.reset(); for (int i = 0; i < validationSet.numExamples(); i++) { DataSet t = validationSet.get(i); INDArray features = t.getFeatureMatrix(); INDArray labels = t.getLabels(); INDArray predicted = model.output(features, false); eval.eval(labels, predicted); } //Print the evaluation statistics System.out.println(eval.stats()); } catch(Exception ex) { ex.printStackTrace(); } }
Example #26
Source File: LearnIrisBackprop.java From aifh with Apache License 2.0 | 4 votes |
/** * The main method. * @param args Not used. */ public static void main(String[] args) { try { int seed = 43; double learningRate = 0.1; int splitTrainNum = (int) (150 * .75); int numInputs = 4; int numOutputs = 3; int numHiddenNodes = 50; // Setup training data. final InputStream istream = LearnIrisBackprop.class.getResourceAsStream("/iris.csv"); if( istream==null ) { System.out.println("Cannot access data set, make sure the resources are available."); System.exit(1); } final NormalizeDataSet ds = NormalizeDataSet.load(istream); final CategoryMap species = ds.encodeOneOfN(4); // species is column 4 istream.close(); DataSet next = ds.extractSupervised(0, 4, 4, 3); next.shuffle(); // Training and validation data split SplitTestAndTrain testAndTrain = next.splitTestAndTrain(splitTrainNum, new Random(seed)); DataSet trainSet = testAndTrain.getTrain(); DataSet validationSet = testAndTrain.getTest(); DataSetIterator trainSetIterator = new ListDataSetIterator(trainSet.asList(), trainSet.numExamples()); DataSetIterator validationSetIterator = new ListDataSetIterator(validationSet.asList(), validationSet.numExamples()); // Create neural network. MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(seed) .iterations(1) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .learningRate(learningRate) .updater(Updater.NESTEROVS).momentum(0.9) .list(2) .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes) .weightInit(WeightInit.XAVIER) .activation("relu") .build()) .layer(1, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD) .weightInit(WeightInit.XAVIER) .activation("softmax") .nIn(numHiddenNodes).nOut(numOutputs).build()) .pretrain(false).backprop(true).build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); model.setListeners(new ScoreIterationListener(1)); // Define when we want to stop training. EarlyStoppingModelSaver saver = new InMemoryModelSaver(); EarlyStoppingConfiguration esConf = new EarlyStoppingConfiguration.Builder() .epochTerminationConditions(new MaxEpochsTerminationCondition(500)) //Max of 50 epochs .epochTerminationConditions(new ScoreImprovementEpochTerminationCondition(25)) .evaluateEveryNEpochs(1) .scoreCalculator(new DataSetLossCalculator(validationSetIterator, true)) //Calculate test set score .modelSaver(saver) .build(); EarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, conf, trainSetIterator); // Train and display result. EarlyStoppingResult result = trainer.fit(); System.out.println("Termination reason: " + result.getTerminationReason()); System.out.println("Termination details: " + result.getTerminationDetails()); System.out.println("Total epochs: " + result.getTotalEpochs()); System.out.println("Best epoch number: " + result.getBestModelEpoch()); System.out.println("Score at best epoch: " + result.getBestModelScore()); model = saver.getBestModel(); // Evaluate Evaluation eval = new Evaluation(numOutputs); validationSetIterator.reset(); for (int i = 0; i < validationSet.numExamples(); i++) { DataSet t = validationSet.get(i); INDArray features = t.getFeatureMatrix(); INDArray labels = t.getLabels(); INDArray predicted = model.output(features, false); System.out.println(features + ":Prediction("+findSpecies(labels,species) +"):Actual("+findSpecies(predicted,species)+")" + predicted ); eval.eval(labels, predicted); } //Print the evaluation statistics System.out.println(eval.stats()); } catch(Exception ex) { ex.printStackTrace(); } }
Example #27
Source File: EarlyStoppingParallelTrainer.java From deeplearning4j with Apache License 2.0 | 4 votes |
public EarlyStoppingParallelTrainer(EarlyStoppingConfiguration<T> earlyStoppingConfiguration, T model, DataSetIterator train, MultiDataSetIterator trainMulti, int workers, int prefetchBuffer, int averagingFrequency) { this(earlyStoppingConfiguration, model, train, trainMulti, null, workers, prefetchBuffer, averagingFrequency, true, true); }
Example #28
Source File: Trainer.java From dl4j-quickstart with Apache License 2.0 | 4 votes |
public static void main(String... args) throws java.io.IOException { // create the data iterators for emnist DataSetIterator emnistTrain = new EmnistDataSetIterator(emnistSet, batchSize, true); DataSetIterator emnistTest = new EmnistDataSetIterator(emnistSet, batchSize, false); int outputNum = EmnistDataSetIterator.numLabels(emnistSet); // network configuration (not yet initialized) MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(rngSeed) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Adam()) .l2(1e-4) .list() .layer(new DenseLayer.Builder() .nIn(numRows * numColumns) // Number of input datapoints. .nOut(1000) // Number of output datapoints. .activation(Activation.RELU) // Activation function. .weightInit(WeightInit.XAVIER) // Weight initialization. .build()) .layer(new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nIn(1000) .nOut(outputNum) .activation(Activation.SOFTMAX) .weightInit(WeightInit.XAVIER) .build()) .pretrain(false).backprop(true) .build(); // create the MLN MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); // pass a training listener that reports score every N iterations network.addListeners(new ScoreIterationListener(reportingInterval)); // here we set up an early stopping trainer // early stopping is useful when your trainer runs for // a long time or you need to programmatically stop training EarlyStoppingConfiguration esConf = new EarlyStoppingConfiguration.Builder() .epochTerminationConditions(new MaxEpochsTerminationCondition(5)) .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(20, TimeUnit.MINUTES)) .scoreCalculator(new DataSetLossCalculator(emnistTest, true)) .evaluateEveryNEpochs(1) .modelSaver(new LocalFileModelSaver(System.getProperty("user.dir"))) .build(); // training EarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, network, emnistTrain); EarlyStoppingResult result = trainer.fit(); // print out early stopping results System.out.println("Termination reason: " + result.getTerminationReason()); System.out.println("Termination details: " + result.getTerminationDetails()); System.out.println("Total epochs: " + result.getTotalEpochs()); System.out.println("Best epoch number: " + result.getBestModelEpoch()); System.out.println("Score at best epoch: " + result.getBestModelScore()); // evaluate basic performance Evaluation eval = network.evaluate(emnistTest); System.out.println(eval.accuracy()); System.out.println(eval.precision()); System.out.println(eval.recall()); // evaluate ROC and calculate the Area Under Curve ROCMultiClass roc = network.evaluateROCMultiClass(emnistTest); System.out.println(roc.calculateAverageAUC()); // calculate AUC for a single class int classIndex = 0; System.out.println(roc.calculateAUC(classIndex)); // optionally, you can print all stats from the evaluations System.out.println(eval.stats()); System.out.println(roc.stats()); }
Example #29
Source File: EarlyStoppingTrainer.java From deeplearning4j with Apache License 2.0 | 4 votes |
public EarlyStoppingTrainer(EarlyStoppingConfiguration<MultiLayerNetwork> earlyStoppingConfiguration, MultiLayerConfiguration configuration, DataSetIterator train) { this(earlyStoppingConfiguration, new MultiLayerNetwork(configuration), train); net.init(); }
Example #30
Source File: EarlyStoppingTrainer.java From deeplearning4j with Apache License 2.0 | 4 votes |
public EarlyStoppingTrainer(EarlyStoppingConfiguration<MultiLayerNetwork> esConfig, MultiLayerNetwork net, DataSetIterator train) { this(esConfig, net, train, null); }