Java Code Examples for org.deeplearning4j.nn.graph.ComputationGraph#setListeners()
The following examples show how to use
org.deeplearning4j.nn.graph.ComputationGraph#setListeners() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestListeners.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testListenersViaModelGraph() { TestListener.clearCounts(); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder() .addInputs("in").addLayer("0", new OutputLayer.Builder(LossFunctions.LossFunction.MSE).nIn(10).nOut(10) .activation(Activation.TANH).build(), "in") .setOutputs("0").build(); ComputationGraph model = new ComputationGraph(conf); model.init(); StatsStorage ss = new InMemoryStatsStorage(); model.setListeners(new TestListener(), new StatsListener(ss)); testListenersForModel(model, null); assertEquals(1, ss.listSessionIDs().size()); assertEquals(2, ss.listWorkerIDsForSession(ss.listSessionIDs().get(0)).size()); }
Example 2
Source File: TestVertxUI.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testUICompGraph() { StatsStorage ss = new InMemoryStatsStorage(); UIServer uiServer = UIServer.getInstance(); uiServer.attach(ss); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") .addLayer("L0", new DenseLayer.Builder().activation(Activation.TANH).nIn(4).nOut(4).build(), "in") .addLayer("L1", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(4).nOut(3).build(), "L0") .setOutputs("L1").build(); ComputationGraph net = new ComputationGraph(conf); net.init(); net.setListeners(new StatsListener(ss), new ScoreIterationListener(1)); DataSetIterator iter = new IrisDataSetIterator(150, 150); for (int i = 0; i < 100; i++) { net.fit(iter); } }
Example 3
Source File: CenterLossOutputLayerTest.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test @Ignore //Should be run manually public void testMNISTConfig() throws Exception { int batchSize = 64; // Test batch size DataSetIterator mnistTrain = new MnistDataSetIterator(batchSize, true, 12345); ComputationGraph net = getCNNMnistConfig(); net.init(); net.setListeners(new ScoreIterationListener(1)); for (int i = 0; i < 50; i++) { net.fit(mnistTrain.next()); Thread.sleep(1000); } Thread.sleep(100000); }
Example 4
Source File: WorkspaceTests.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testClearing() { for(WorkspaceMode wsm : WorkspaceMode.values()) { ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder() .updater(new Adam()) .inferenceWorkspaceMode(wsm) .trainingWorkspaceMode(wsm) .graphBuilder() .addInputs("in") .setInputTypes(InputType.recurrent(200)) .addLayer("embeddings", new EmbeddingLayer.Builder().nIn(200).nOut(50).build(), "in") .addLayer("a", new GravesLSTM.Builder().nOut(300).activation(Activation.HARDTANH).build(), "embeddings") .addVertex("b", new LastTimeStepVertex("in"), "a") .addLayer("c", new DenseLayer.Builder().nOut(300).activation(Activation.HARDTANH).build(), "b") .addLayer("output", new LossLayer.Builder().lossFunction(LossFunctions.LossFunction.COSINE_PROXIMITY).build(), "c") .setOutputs("output") .build(); final ComputationGraph computationGraph = new ComputationGraph(config); computationGraph.init(); computationGraph.setListeners(new ScoreIterationListener(3)); WSTestDataSetIterator iterator = new WSTestDataSetIterator(); computationGraph.fit(iterator); } }
Example 5
Source File: TestEarlyStoppingSparkCompGraph.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testNoImprovementNEpochsTermination() { //Idea: terminate training if score (test set loss) does not improve for 5 consecutive epochs //Simulate this by setting LR = 0.0 Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).graphBuilder() .addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(5)); JavaRDD<DataSet> irisData = getIris(); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(100), new ScoreImprovementEpochTerminationCondition(5)) .iterationTerminationConditions(new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5 .scoreCalculator(new SparkLossCalculatorComputationGraph( irisData.map(new DataSetToMultiDataSetFn()), true, sc.sc())) .modelSaver(saver).build(); TrainingMaster tm = new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0); IEarlyStoppingTrainer<ComputationGraph> trainer = new SparkEarlyStoppingGraphTrainer(getContext().sc(), tm, esConf, net, irisData.map(new DataSetToMultiDataSetFn())); EarlyStoppingResult result = trainer.fit(); //Expect no score change due to 0 LR -> terminate after 6 total epochs assertTrue(result.getTotalEpochs() < 12); //Normally expect 6 epochs exactly; get a little more than that here due to rounding + order of operations assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason()); String expDetails = new ScoreImprovementEpochTerminationCondition(5).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example 6
Source File: TestEarlyStoppingSparkCompGraph.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testBadTuning() { //Test poor tuning (high LR): should terminate on MaxScoreIterationTerminationCondition Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(2.0)) //Intentionally huge LR .weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.IDENTITY) .lossFunction(LossFunctions.LossFunction.MSE).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(5)); JavaRDD<DataSet> irisData = getIris(); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5000)) .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(2, TimeUnit.MINUTES), new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5 .scoreCalculator(new SparkLossCalculatorComputationGraph( irisData.map(new DataSetToMultiDataSetFn()), true, sc.sc())) .modelSaver(saver).build(); TrainingMaster tm = new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0); IEarlyStoppingTrainer<ComputationGraph> trainer = new SparkEarlyStoppingGraphTrainer(getContext().sc(), tm, esConf, net, irisData.map(new DataSetToMultiDataSetFn())); EarlyStoppingResult result = trainer.fit(); assertTrue(result.getTotalEpochs() < 5); assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition, result.getTerminationReason()); String expDetails = new MaxScoreIterationTerminationCondition(7.5).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example 7
Source File: TestEarlyStoppingSparkCompGraph.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testListeners() { ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(5)); JavaRDD<DataSet> irisData = getIris(); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5)) .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(2, TimeUnit.MINUTES)) .scoreCalculator(new SparkLossCalculatorComputationGraph( irisData.map(new DataSetToMultiDataSetFn()), true, sc.sc())) .modelSaver(saver).build(); LoggingEarlyStoppingListener listener = new LoggingEarlyStoppingListener(); TrainingMaster tm = new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0); IEarlyStoppingTrainer<ComputationGraph> trainer = new SparkEarlyStoppingGraphTrainer(getContext().sc(), tm, esConf, net, irisData.map(new DataSetToMultiDataSetFn())); trainer.setListener(listener); trainer.fit(); assertEquals(1, listener.onStartCallCount); assertEquals(5, listener.onEpochCallCount); assertEquals(1, listener.onCompletionCallCount); }
Example 8
Source File: TestEarlyStoppingCompGraph.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testEarlyStoppingListenersCG() { ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER) .graphBuilder() .addInputs("in") .layer("0", new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0") .build(); ComputationGraph net = new ComputationGraph(conf); TestEarlyStopping.TestListener tl = new TestEarlyStopping.TestListener(); net.setListeners(tl); DataSetIterator irisIter = new IrisDataSetIterator(50, 150); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5)) .iterationTerminationConditions( new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES)) .scoreCalculator(new DataSetLossCalculator(irisIter, true)).modelSaver(saver) .build(); IEarlyStoppingTrainer<ComputationGraph> trainer = new EarlyStoppingGraphTrainer(esConf, net, irisIter); trainer.fit(); assertEquals(5, tl.getCountEpochStart()); assertEquals(5, tl.getCountEpochEnd()); assertEquals(5 * 150/50, tl.getIterCount()); assertEquals(4, tl.getMaxEpochStart()); assertEquals(4, tl.getMaxEpochEnd()); }
Example 9
Source File: TestEarlyStoppingCompGraph.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testListeners() { ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5)) .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES)) .scoreCalculator(new DataSetLossCalculatorCG(irisIter, true)).modelSaver(saver).build(); LoggingEarlyStoppingListener listener = new LoggingEarlyStoppingListener(); IEarlyStoppingTrainer trainer = new EarlyStoppingGraphTrainer(esConf, net, irisIter, listener); trainer.fit(); assertEquals(1, listener.onStartCallCount); assertEquals(5, listener.onEpochCallCount); assertEquals(1, listener.onCompletionCallCount); }
Example 10
Source File: TestEarlyStoppingCompGraph.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testNoImprovementNEpochsTermination() { //Idea: terminate training if score (test set loss) does not improve for 5 consecutive epochs //Simulate this by setting LR = 0.0 Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).graphBuilder() .addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(100), new ScoreImprovementEpochTerminationCondition(5)) .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES), new MaxScoreIterationTerminationCondition(50)) //Initial score is ~8 .scoreCalculator(new DataSetLossCalculatorCG(irisIter, true)).modelSaver(saver).build(); IEarlyStoppingTrainer trainer = new EarlyStoppingGraphTrainer(esConf, net, irisIter); EarlyStoppingResult result = trainer.fit(); //Expect no score change due to 0 LR -> terminate after 6 total epochs assertEquals(6, result.getTotalEpochs()); assertEquals(0, result.getBestModelEpoch()); assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason()); String expDetails = new ScoreImprovementEpochTerminationCondition(5).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example 11
Source File: TestEarlyStoppingCompGraph.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testTimeTermination() { //test termination after max time Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(1e-6)).weightInit(WeightInit.XAVIER).graphBuilder() .addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(10000)) .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(5, TimeUnit.SECONDS), new MaxScoreIterationTerminationCondition(50)) //Initial score is ~8 .scoreCalculator(new DataSetLossCalculator(irisIter, true)) .modelSaver(saver).build(); IEarlyStoppingTrainer trainer = new EarlyStoppingGraphTrainer(esConf, net, irisIter); long startTime = System.currentTimeMillis(); EarlyStoppingResult result = trainer.fit(); long endTime = System.currentTimeMillis(); int durationSeconds = (int) (endTime - startTime) / 1000; assertTrue(durationSeconds >= 3); assertTrue(durationSeconds <= 20); assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition, result.getTerminationReason()); String expDetails = new MaxTimeIterationTerminationCondition(5, TimeUnit.SECONDS).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example 12
Source File: TestEarlyStoppingCompGraph.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testBadTuning() { //Test poor tuning (high LR): should terminate on MaxScoreIterationTerminationCondition Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(5.0)) //Intentionally huge LR .weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5000)) .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES), new MaxScoreIterationTerminationCondition(10)) //Initial score is ~2.5 .scoreCalculator(new DataSetLossCalculatorCG(irisIter, true)).modelSaver(saver).build(); IEarlyStoppingTrainer trainer = new EarlyStoppingGraphTrainer(esConf, net, irisIter); EarlyStoppingResult result = trainer.fit(); assertTrue(result.getTotalEpochs() < 5); assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition, result.getTerminationReason()); String expDetails = new MaxScoreIterationTerminationCondition(10).toString(); assertEquals(expDetails, result.getTerminationDetails()); assertEquals(0, result.getBestModelEpoch()); assertNotNull(result.getBestModel()); }
Example 13
Source File: Main.java From twse-captcha-solver-dl4j with MIT License | 5 votes |
public static void main(String[] args) throws Exception { long startTime = System.currentTimeMillis(); logger.info("start up time: " + startTime); File modelDir = new File(modelDirPath); // create dir boolean hasDir = modelDir.exists() || modelDir.mkdirs(); logger.info(modelPath); // create model ComputationGraph model = createModel(); // monitor the model score UIServer uiServer = UIServer.getInstance(); StatsStorage statsStorage = new InMemoryStatsStorage(); uiServer.attach(statsStorage); model.setListeners(new ScoreIterationListener(36), new StatsListener(statsStorage)); // construct the iterator MultiDataSetIterator trainMulIterator = new CaptchaSetIterator(batchSize, "train"); MultiDataSetIterator testMulIterator = new CaptchaSetIterator(batchSize, "test"); MultiDataSetIterator validateMulIterator = new CaptchaSetIterator(batchSize, "validate"); // fit for (int i = 0; i < epochs; i++) { System.out.println("Epoch=====================" + i); model.fit(trainMulIterator); } ModelSerializer.writeModel(model, modelPath, true); long endTime = System.currentTimeMillis(); System.out.println("=============run time=====================" + (endTime - startTime)); System.out.println("=====eval model=====test=================="); modelPredict(model, testMulIterator); System.out.println("=====eval model=====validate=================="); modelPredict(model, validateMulIterator); }
Example 14
Source File: TrainCifar10Model.java From Java-Machine-Learning-for-Computer-Vision with MIT License | 4 votes |
private void train() throws IOException { ZooModel zooModel = VGG16.builder().build(); ComputationGraph vgg16 = (ComputationGraph) zooModel.initPretrained(PretrainedType.CIFAR10); log.info(vgg16.summary()); IUpdater iUpdaterWithDefaultConfig = Updater.ADAM.getIUpdaterWithDefaultConfig(); iUpdaterWithDefaultConfig.setLrAndSchedule(0.1, null); FineTuneConfiguration fineTuneConf = new FineTuneConfiguration.Builder() .seed(1234) // .weightInit(WeightInit.XAVIER) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .activation(Activation.RELU) .updater(iUpdaterWithDefaultConfig) .cudnnAlgoMode(ConvolutionLayer.AlgoMode.NO_WORKSPACE) .miniBatch(true) .inferenceWorkspaceMode(WorkspaceMode.ENABLED) .trainingWorkspaceMode(WorkspaceMode.ENABLED) .pretrain(true) .backprop(true) .build(); ComputationGraph cifar10 = new TransferLearning.GraphBuilder(vgg16) .setWorkspaceMode(WorkspaceMode.ENABLED) .fineTuneConfiguration(fineTuneConf) .setInputTypes(InputType.convolutionalFlat(ImageUtils.HEIGHT, ImageUtils.WIDTH, 3)) .removeVertexAndConnections("dense_2_loss") .removeVertexAndConnections("dense_2") .removeVertexAndConnections("dense_1") .removeVertexAndConnections("dropout_1") .removeVertexAndConnections("embeddings") .removeVertexAndConnections("flatten_1") .addLayer("dense_1", new DenseLayer.Builder() .nIn(4096) .nOut(EMBEDDINGS) .activation(Activation.RELU).build(), "block3_pool") .addVertex("embeddings", new L2NormalizeVertex(new int[]{}, 1e-12), "dense_1") .addLayer("lossLayer", new CenterLossOutputLayer.Builder() .lossFunction(LossFunctions.LossFunction.SQUARED_LOSS) .activation(Activation.SOFTMAX).nIn(EMBEDDINGS).nOut(NUM_POSSIBLE_LABELS) .lambda(LAMBDA).alpha(0.9) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).build(), "embeddings") .setOutputs("lossLayer") .build(); log.info(cifar10.summary()); File rootDir = new File("CarTracking/train_from_video_" + NUM_POSSIBLE_LABELS); DataSetIterator dataSetIterator = ImageUtils.createDataSetIterator(rootDir, NUM_POSSIBLE_LABELS, BATCH_SIZE); DataSetIterator testIterator = ImageUtils.createDataSetIterator(rootDir, NUM_POSSIBLE_LABELS, BATCH_SIZE); cifar10.setListeners(new ScoreIterationListener(2)); int iEpoch = I_EPOCH; while (iEpoch < EPOCH_TRAINING) { while (dataSetIterator.hasNext()) { DataSet trainMiniBatchData = null; try { trainMiniBatchData = dataSetIterator.next(); } catch (Exception e) { e.printStackTrace(); } cifar10.fit(trainMiniBatchData); } iEpoch++; String modelName = PREFIX + NUM_POSSIBLE_LABELS + "_epoch_data_e" + EMBEDDINGS + "_b" + BATCH_SIZE + "_" + iEpoch + ".zip"; saveProgress(cifar10, iEpoch, modelName); testResults(cifar10, testIterator, iEpoch, modelName); dataSetIterator.reset(); log.info("iEpoch = " + iEpoch); } }
Example 15
Source File: TestListeners.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testListenerCalls(){ MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .list() .layer(new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); TestListener tl = new TestListener(); net.setListeners(tl); DataSetIterator irisIter = new IrisDataSetIterator(50, 150); net.fit(irisIter, 2); List<Triple<Call,Integer,Integer>> exp = new ArrayList<>(); exp.add(new Triple<>(Call.EPOCH_START, 0, 0)); exp.add(new Triple<>(Call.ON_FWD, 0, 0)); exp.add(new Triple<>(Call.ON_BWD, 0, 0)); exp.add(new Triple<>(Call.ON_GRAD, 0, 0)); exp.add(new Triple<>(Call.ITER_DONE, 0, 0)); exp.add(new Triple<>(Call.ON_FWD, 1, 0)); exp.add(new Triple<>(Call.ON_BWD, 1, 0)); exp.add(new Triple<>(Call.ON_GRAD, 1, 0)); exp.add(new Triple<>(Call.ITER_DONE, 1, 0)); exp.add(new Triple<>(Call.ON_FWD, 2, 0)); exp.add(new Triple<>(Call.ON_BWD, 2, 0)); exp.add(new Triple<>(Call.ON_GRAD, 2, 0)); exp.add(new Triple<>(Call.ITER_DONE, 2, 0)); exp.add(new Triple<>(Call.EPOCH_END, 3, 0)); //Post updating iter count, pre update epoch count exp.add(new Triple<>(Call.EPOCH_START, 3, 1)); exp.add(new Triple<>(Call.ON_FWD, 3, 1)); exp.add(new Triple<>(Call.ON_BWD, 3, 1)); exp.add(new Triple<>(Call.ON_GRAD, 3, 1)); exp.add(new Triple<>(Call.ITER_DONE, 3, 1)); exp.add(new Triple<>(Call.ON_FWD, 4, 1)); exp.add(new Triple<>(Call.ON_BWD, 4, 1)); exp.add(new Triple<>(Call.ON_GRAD, 4, 1)); exp.add(new Triple<>(Call.ITER_DONE, 4, 1)); exp.add(new Triple<>(Call.ON_FWD, 5, 1)); exp.add(new Triple<>(Call.ON_BWD, 5, 1)); exp.add(new Triple<>(Call.ON_GRAD, 5, 1)); exp.add(new Triple<>(Call.ITER_DONE, 5, 1)); exp.add(new Triple<>(Call.EPOCH_END, 6, 1)); assertEquals(exp, tl.getCalls()); tl = new TestListener(); ComputationGraph cg = net.toComputationGraph(); cg.setListeners(tl); cg.fit(irisIter, 2); assertEquals(exp, tl.getCalls()); }
Example 16
Source File: TestVertxUI.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testAutoAttach() throws Exception { ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") .addLayer("L0", new DenseLayer.Builder().activation(Activation.TANH).nIn(4).nOut(4).build(), "in") .addLayer("L1", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(4).nOut(3).build(), "L0") .setOutputs("L1").build(); ComputationGraph net = new ComputationGraph(conf); net.init(); StatsStorage ss1 = new InMemoryStatsStorage(); net.setListeners(new StatsListener(ss1, 1, "ss1")); DataSetIterator iter = new IrisDataSetIterator(150, 150); for (int i = 0; i < 5; i++) { net.fit(iter); } StatsStorage ss2 = new InMemoryStatsStorage(); net.setListeners(new StatsListener(ss2, 1, "ss2")); for (int i = 0; i < 4; i++) { net.fit(iter); } UIServer ui = UIServer.getInstance(true, null); try { ((VertxUIServer) ui).autoAttachStatsStorageBySessionId(new Function<String, StatsStorage>() { @Override public StatsStorage apply(String s) { if ("ss1".equals(s)) { return ss1; } else if ("ss2".equals(s)) { return ss2; } return null; } }); String json1 = IOUtils.toString(new URL("http://localhost:9000/train/ss1/overview/data"), StandardCharsets.UTF_8); String json2 = IOUtils.toString(new URL("http://localhost:9000/train/ss2/overview/data"), StandardCharsets.UTF_8); assertNotEquals(json1, json2); Map<String, Object> m1 = JsonMappers.getMapper().readValue(json1, Map.class); Map<String, Object> m2 = JsonMappers.getMapper().readValue(json2, Map.class); List<Object> s1 = (List<Object>) m1.get("scores"); List<Object> s2 = (List<Object>) m2.get("scores"); assertEquals(5, s1.size()); assertEquals(4, s2.size()); } finally { ui.stop(); } }
Example 17
Source File: ActorCriticFactoryCompGraphStdDense.java From deeplearning4j with Apache License 2.0 | 4 votes |
public ActorCriticCompGraph buildActorCritic(int[] numInputs, int numOutputs) { int nIn = 1; for (int i : numInputs) { nIn *= i; } ComputationGraphConfiguration.GraphBuilder confB = new NeuralNetConfiguration.Builder().seed(Constants.NEURAL_NET_SEED) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(conf.getUpdater() != null ? conf.getUpdater() : new Adam()) .weightInit(WeightInit.XAVIER) .l2(conf.getL2()).graphBuilder() .setInputTypes(conf.isUseLSTM() ? InputType.recurrent(nIn) : InputType.feedForward(nIn)).addInputs("input") .addLayer("0", new DenseLayer.Builder().nIn(nIn) .nOut(conf.getNumHiddenNodes()).activation(Activation.RELU).build(), "input"); for (int i = 1; i < conf.getNumLayers(); i++) { confB.addLayer(i + "", new DenseLayer.Builder().nIn(conf.getNumHiddenNodes()).nOut(conf.getNumHiddenNodes()) .activation(Activation.RELU).build(), (i - 1) + ""); } if (conf.isUseLSTM()) { confB.addLayer(getConf().getNumLayers() + "", new LSTM.Builder().activation(Activation.TANH) .nOut(conf.getNumHiddenNodes()).build(), (getConf().getNumLayers() - 1) + ""); confB.addLayer("value", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY) .nOut(1).build(), getConf().getNumLayers() + ""); confB.addLayer("softmax", new RnnOutputLayer.Builder(new ActorCriticLoss()).activation(Activation.SOFTMAX) .nOut(numOutputs).build(), getConf().getNumLayers() + ""); } else { confB.addLayer("value", new OutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY) .nOut(1).build(), (getConf().getNumLayers() - 1) + ""); confB.addLayer("softmax", new OutputLayer.Builder(new ActorCriticLoss()).activation(Activation.SOFTMAX) .nOut(numOutputs).build(), (getConf().getNumLayers() - 1) + ""); } confB.setOutputs("value", "softmax"); ComputationGraphConfiguration cgconf = confB.build(); ComputationGraph model = new ComputationGraph(cgconf); model.init(); if (conf.getListeners() != null) { model.setListeners(conf.getListeners()); } else { model.setListeners(new ScoreIterationListener(Constants.NEURAL_NET_ITERATION_LISTENER)); } return new ActorCriticCompGraph(model); }
Example 18
Source File: ActorCriticFactoryCompGraphStdConv.java From deeplearning4j with Apache License 2.0 | 4 votes |
public ActorCriticCompGraph buildActorCritic(int shapeInputs[], int numOutputs) { if (shapeInputs.length == 1) throw new AssertionError("Impossible to apply convolutional layer on a shape == 1"); int h = (((shapeInputs[1] - 8) / 4 + 1) - 4) / 2 + 1; int w = (((shapeInputs[2] - 8) / 4 + 1) - 4) / 2 + 1; ComputationGraphConfiguration.GraphBuilder confB = new NeuralNetConfiguration.Builder().seed(Constants.NEURAL_NET_SEED) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(conf.getUpdater() != null ? conf.getUpdater() : new Adam()) .weightInit(WeightInit.XAVIER) .l2(conf.getL2()).graphBuilder() .addInputs("input").addLayer("0", new ConvolutionLayer.Builder(8, 8).nIn(shapeInputs[0]).nOut(16) .stride(4, 4).activation(Activation.RELU).build(), "input"); confB.addLayer("1", new ConvolutionLayer.Builder(4, 4).nIn(16).nOut(32).stride(2, 2).activation(Activation.RELU).build(), "0"); confB.addLayer("2", new DenseLayer.Builder().nIn(w * h * 32).nOut(256).activation(Activation.RELU).build(), "1"); if (conf.isUseLSTM()) { confB.addLayer("3", new LSTM.Builder().nIn(256).nOut(256).activation(Activation.TANH).build(), "2"); confB.addLayer("value", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY) .nIn(256).nOut(1).build(), "3"); confB.addLayer("softmax", new RnnOutputLayer.Builder(new ActorCriticLoss()).activation(Activation.SOFTMAX) .nIn(256).nOut(numOutputs).build(), "3"); } else { confB.addLayer("value", new OutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY) .nIn(256).nOut(1).build(), "2"); confB.addLayer("softmax", new OutputLayer.Builder(new ActorCriticLoss()).activation(Activation.SOFTMAX) .nIn(256).nOut(numOutputs).build(), "2"); } confB.setOutputs("value", "softmax"); if (conf.isUseLSTM()) { confB.inputPreProcessor("0", new RnnToCnnPreProcessor(shapeInputs[1], shapeInputs[2], shapeInputs[0])); confB.inputPreProcessor("2", new CnnToFeedForwardPreProcessor(h, w, 32)); confB.inputPreProcessor("3", new FeedForwardToRnnPreProcessor()); } else { confB.setInputTypes(InputType.convolutional(shapeInputs[1], shapeInputs[2], shapeInputs[0])); } ComputationGraphConfiguration cgconf = confB.build(); ComputationGraph model = new ComputationGraph(cgconf); model.init(); if (conf.getListeners() != null) { model.setListeners(conf.getListeners()); } else { model.setListeners(new ScoreIterationListener(Constants.NEURAL_NET_ITERATION_LISTENER)); } return new ActorCriticCompGraph(model); }
Example 19
Source File: TestEarlyStoppingSparkCompGraph.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testTimeTermination() { //test termination after max time Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(1e-6)).weightInit(WeightInit.XAVIER).graphBuilder() .addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); net.setListeners(new ScoreIterationListener(5)); JavaRDD<DataSet> irisData = getIris(); EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>() .epochTerminationConditions(new MaxEpochsTerminationCondition(10000)) .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(3, TimeUnit.SECONDS), new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5 .scoreCalculator(new SparkLossCalculatorComputationGraph( irisData.map(new DataSetToMultiDataSetFn()), true, sc.sc())) .modelSaver(saver).build(); TrainingMaster tm = new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0); IEarlyStoppingTrainer<ComputationGraph> trainer = new SparkEarlyStoppingGraphTrainer(getContext().sc(), tm, esConf, net, irisData.map(new DataSetToMultiDataSetFn())); long startTime = System.currentTimeMillis(); EarlyStoppingResult result = trainer.fit(); long endTime = System.currentTimeMillis(); int durationSeconds = (int) (endTime - startTime) / 1000; assertTrue(durationSeconds >= 3); assertTrue(durationSeconds <= 20); assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition, result.getTerminationReason()); String expDetails = new MaxTimeIterationTerminationCondition(3, TimeUnit.SECONDS).toString(); assertEquals(expDetails, result.getTerminationDetails()); }
Example 20
Source File: TransferLearningVGG16.java From Java-Machine-Learning-for-Computer-Vision with MIT License | 4 votes |
public void train() throws IOException { ComputationGraph preTrainedNet = loadVGG16PreTrainedWeights(); log.info("VGG 16 Architecture"); log.info(preTrainedNet.summary()); log.info("Start Downloading NeuralNetworkTrainingData..."); downloadAndUnzipDataForTheFirstTime(); log.info("NeuralNetworkTrainingData Downloaded and unzipped"); neuralNetworkTrainingData = new DataStorage() { }.loadData(); FineTuneConfiguration fineTuneConf = new FineTuneConfiguration.Builder() .learningRate(LEARNING_RATE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(Updater.NESTEROVS) .seed(1234) .build(); ComputationGraph vgg16Transfer = new TransferLearning.GraphBuilder(preTrainedNet) .fineTuneConfiguration(fineTuneConf) .setFeatureExtractor(FREEZE_UNTIL_LAYER) .removeVertexKeepConnections("predictions") .addLayer("predictions", new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nIn(4096) .nOut(NUM_POSSIBLE_LABELS) .weightInit(WeightInit.XAVIER) .activation(Activation.SOFTMAX) .build(), FREEZE_UNTIL_LAYER) .build(); vgg16Transfer.setListeners(new ScoreIterationListener(5)); log.info("Modified VGG 16 Architecture for transfer learning"); log.info(vgg16Transfer.summary()); int iEpoch = 0; int iIteration = 0; while (iEpoch < EPOCH) { while (neuralNetworkTrainingData.getTrainIterator().hasNext()) { DataSet trainMiniBatchData = neuralNetworkTrainingData.getTrainIterator().next(); vgg16Transfer.fit(trainMiniBatchData); saveProgressEveryConfiguredInterval(vgg16Transfer, iEpoch, iIteration); iIteration++; } neuralNetworkTrainingData.getTrainIterator().reset(); iEpoch++; evalOn(vgg16Transfer, neuralNetworkTrainingData.getTestIterator(), iEpoch); } }