Java Code Examples for org.deeplearning4j.eval.Evaluation#eval()
The following examples show how to use
org.deeplearning4j.eval.Evaluation#eval() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TrainUtil.java From FancyBing with GNU General Public License v3.0 | 6 votes |
public static double evaluate(Model model, int outputNum, MultiDataSetIterator testData, int topN, int batchSize) { log.info("Evaluate model...."); Evaluation clsEval = new Evaluation(createLabels(outputNum), topN); RegressionEvaluation valueRegEval1 = new RegressionEvaluation(1); int count = 0; long begin = 0; long consume = 0; while(testData.hasNext()){ MultiDataSet ds = testData.next(); begin = System.nanoTime(); INDArray[] output = ((ComputationGraph) model).output(false, ds.getFeatures()); consume += System.nanoTime() - begin; clsEval.eval(ds.getLabels(0), output[0]); valueRegEval1.eval(ds.getLabels(1), output[1]); count++; } String stats = clsEval.stats(); int pos = stats.indexOf("==="); stats = "\n" + stats.substring(pos); log.info(stats); log.info(valueRegEval1.stats()); testData.reset(); log.info("Evaluate time: " + consume + " count: " + (count * batchSize) + " average: " + ((float) consume/(count*batchSize)/1000)); return clsEval.accuracy(); }
Example 2
Source File: IrisModel.java From FederatedAndroidTrainer with MIT License | 5 votes |
@Override public String evaluate(FederatedDataSet federatedDataSet) { //evaluate the model on the test set DataSet testData = (DataSet) federatedDataSet.getNativeDataSet(); double score = model.score(testData); Evaluation eval = new Evaluation(numClasses); INDArray output = model.output(testData.getFeatureMatrix()); eval.eval(testData.getLabels(), output); return "Score: " + score; }
Example 3
Source File: ParallelInferenceTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
protected void evalClassifcationSingleThread(@NonNull ParallelInference inf, @NonNull DataSetIterator iterator) { DataSet ds = iterator.next(); log.info("NumColumns: {}", ds.getLabels().columns()); iterator.reset(); Evaluation eval = new Evaluation(ds.getLabels().columns()); int count = 0; while (iterator.hasNext() && (count++ < 100)) { ds = iterator.next(); INDArray output = inf.output(ds.getFeatures()); eval.eval(ds.getLabels(), output); } log.info(eval.stats()); }
Example 4
Source File: DataSetIteratorTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testLfwModel() throws Exception { final int numRows = 28; final int numColumns = 28; int numChannels = 3; int outputNum = LFWLoader.NUM_LABELS; int numSamples = LFWLoader.NUM_IMAGES; int batchSize = 2; int seed = 123; int listenerFreq = 1; LFWDataSetIterator lfw = new LFWDataSetIterator(batchSize, numSamples, new int[] {numRows, numColumns, numChannels}, outputNum, false, true, 1.0, new Random(seed)); MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new ConvolutionLayer.Builder(5, 5).nIn(numChannels).nOut(6) .weightInit(WeightInit.XAVIER).activation(Activation.RELU).build()) .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2}) .stride(1, 1).build()) .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX) .build()) .setInputType(InputType.convolutionalFlat(numRows, numColumns, numChannels)) ; MultiLayerNetwork model = new MultiLayerNetwork(builder.build()); model.init(); model.setListeners(new ScoreIterationListener(listenerFreq)); model.fit(lfw.next()); DataSet dataTest = lfw.next(); INDArray output = model.output(dataTest.getFeatures()); Evaluation eval = new Evaluation(outputNum); eval.eval(dataTest.getLabels(), output); // System.out.println(eval.stats()); }
Example 5
Source File: MultiLayerTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testBackProp() { Nd4j.getRandom().setSeed(123); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).seed(123).list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) .activation(Activation.TANH).build()) .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).weightInit(WeightInit.XAVIER) .activation(Activation.TANH).build()) .layer(2, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( LossFunctions.LossFunction.MCXENT).weightInit(WeightInit.XAVIER) .activation(Activation.SOFTMAX).nIn(2).nOut(3).build()) .build(); MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); network.setListeners(new ScoreIterationListener(1)); DataSetIterator iter = new IrisDataSetIterator(150, 150); DataSet next = iter.next(); next.normalizeZeroMeanZeroUnitVariance(); SplitTestAndTrain trainTest = next.splitTestAndTrain(110); network.setInput(trainTest.getTrain().getFeatures()); network.setLabels(trainTest.getTrain().getLabels()); network.init(); for( int i=0; i<5; i++ ) { network.fit(trainTest.getTrain()); } DataSet test = trainTest.getTest(); Evaluation eval = new Evaluation(); INDArray output = network.output(test.getFeatures()); eval.eval(test.getLabels(), output); log.info("Score " + eval.stats()); }
Example 6
Source File: ConvolutionLayerTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testCNNMLNBackprop() throws Exception { int numSamples = 10; int batchSize = 10; DataSetIterator mnistIter = new MnistDataSetIterator(batchSize, numSamples, true); MultiLayerNetwork model = getCNNMLNConfig(true, false); model.fit(mnistIter); MultiLayerNetwork model2 = getCNNMLNConfig(true, false); model2.fit(mnistIter); mnistIter.reset(); DataSet test = mnistIter.next(); Evaluation eval = new Evaluation(); INDArray output = model.output(test.getFeatures()); eval.eval(test.getLabels(), output); double f1Score = eval.f1(); Evaluation eval2 = new Evaluation(); INDArray output2 = model2.output(test.getFeatures()); eval2.eval(test.getLabels(), output2); double f1Score2 = eval2.f1(); assertEquals(f1Score, f1Score2, 1e-4); }
Example 7
Source File: ConvolutionLayerTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testCNNMLNPretrain() throws Exception { // Note CNN does not do pretrain int numSamples = 10; int batchSize = 10; DataSetIterator mnistIter = new MnistDataSetIterator(batchSize, numSamples, true); MultiLayerNetwork model = getCNNMLNConfig(false, true); model.fit(mnistIter); mnistIter.reset(); MultiLayerNetwork model2 = getCNNMLNConfig(false, true); model2.fit(mnistIter); mnistIter.reset(); DataSet test = mnistIter.next(); Evaluation eval = new Evaluation(); INDArray output = model.output(test.getFeatures()); eval.eval(test.getLabels(), output); double f1Score = eval.f1(); Evaluation eval2 = new Evaluation(); INDArray output2 = model2.output(test.getFeatures()); eval2.eval(test.getLabels(), output2); double f1Score2 = eval2.f1(); assertEquals(f1Score, f1Score2, 1e-4); }
Example 8
Source File: MNISTModel.java From FederatedAndroidTrainer with MIT License | 5 votes |
@Override public String evaluate(FederatedDataSet federatedDataSet) { DataSet testData = (DataSet) federatedDataSet.getNativeDataSet(); List<DataSet> listDs = testData.asList(); DataSetIterator iterator = new ListDataSetIterator(listDs, BATCH_SIZE); Evaluation eval = new Evaluation(OUTPUT_NUM); //create an evaluation object with 10 possible classes while (iterator.hasNext()) { DataSet next = iterator.next(); INDArray output = model.output(next.getFeatureMatrix()); //get the networks prediction eval.eval(next.getLabels(), output); //check the prediction against the true class } return eval.stats(); }
Example 9
Source File: ModelUtils.java From gluon-samples with BSD 3-Clause "New" or "Revised" License | 4 votes |
public void evaluateModel(MultiLayerNetwork model, boolean invertColors) throws IOException { LOGGER.info("******EVALUATE MODEL******"); ParentPathLabelGenerator labelMaker = new ParentPathLabelGenerator(); ImageRecordReader recordReader = new ImageRecordReader(height,width,channels,labelMaker); // recordReader.setListeners(new LogRecordListener()); // Initialize the record reader // add a listener, to extract the name File testData = new File(DATA_PATH + "/mnist_png/testing"); FileSplit test = new FileSplit(testData,NativeImageLoader.ALLOWED_FORMATS,randNumGen); // The model trained on the training dataset split // now that it has trained we evaluate against the // test data of images the network has not seen recordReader.initialize(test); DataNormalization scaler = new ImagePreProcessingScaler(invertColors ? 1 : 0, invertColors ? 0 : 1); DataSetIterator testIter = new RecordReaderDataSetIterator(recordReader,batchSize,1,outputNum); scaler.fit(testIter); testIter.setPreProcessor(scaler); /* log the order of the labels for later use In previous versions the label order was consistent, but random In current verions label order is lexicographic preserving the RecordReader Labels order is no longer needed left in for demonstration purposes */ LOGGER.info(recordReader.getLabels().toString()); // Create Eval object with 10 possible classes Evaluation eval = new Evaluation(outputNum); // Evaluate the network while (testIter.hasNext()) { DataSet next = testIter.next(); INDArray output = model.output(next.getFeatureMatrix()); // Compare the Feature Matrix from the model // with the labels from the RecordReader eval.eval(next.getLabels(), output); } LOGGER.info(eval.stats()); }
Example 10
Source File: NeuralNetworks.java From Machine-Learning-in-Java with MIT License | 4 votes |
public static void main(String[] args) throws Exception { final int numRows = 28; final int numColumns = 28; int outputNum = 10; int numSamples = 60000; int batchSize = 100; int iterations = 10; int seed = 123; int listenerFreq = batchSize / 5; log.info("Load data...."); DataSetIterator iter = new MnistDataSetIterator(batchSize, numSamples, true); log.info("Build model...."); MultiLayerNetwork model = softMaxRegression(seed, iterations, numRows, numColumns, outputNum); // // MultiLayerNetwork model = deepBeliefNetwork(seed, iterations, // // numRows, numColumns, outputNum); // MultiLayerNetwork model = deepConvNetwork(seed, iterations, numRows, // numColumns, outputNum); model.init(); model.setListeners(Collections .singletonList((IterationListener) new ScoreIterationListener( listenerFreq))); log.info("Train model...."); model.fit(iter); // achieves end to end pre-training log.info("Evaluate model...."); Evaluation eval = new Evaluation(outputNum); DataSetIterator testIter = new MnistDataSetIterator(100, 10000); while (testIter.hasNext()) { DataSet testMnist = testIter.next(); INDArray predict2 = model.output(testMnist.getFeatureMatrix()); eval.eval(testMnist.getLabels(), predict2); } log.info(eval.stats()); log.info("****************Example finished********************"); }
Example 11
Source File: MLPMnistSingleLayerExample.java From dl4j-tutorials with MIT License | 4 votes |
public static void main(String[] args) throws Exception { //number of rows and columns in the input pictures final int numRows = 28; final int numColumns = 28; int outputNum = 10; // number of output classes int batchSize = 128; // batch size for each epoch int rngSeed = 123; // random number seed for reproducibility int numEpochs = 15; // number of epochs to perform //Get the DataSetIterators: DataSetIterator mnistTrain = new MnistDataSetIterator(batchSize, true, rngSeed); DataSetIterator mnistTest = new MnistDataSetIterator(batchSize, false, rngSeed); log.info("Build model...."); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(rngSeed) //include a random seed for reproducibility // use stochastic gradient descent as an optimization algorithm .updater(new Nesterovs(0.006, 0.9)) .l2(1e-4) .list() .layer(0, new DenseLayer.Builder() //create the first, input layer with xavier initialization // batchSize, features .nIn(numRows * numColumns) .nOut(1000) .activation(Activation.RELU) .weightInit(WeightInit.XAVIER) .build()) .layer(1, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD) //create hidden layer .nIn(1000) .nOut(outputNum) .activation(Activation.SOFTMAX) .weightInit(WeightInit.XAVIER) .build()) .pretrain(false).backprop(true) //use backpropagation to adjust weights .build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); //print the score with every 1 iteration model.setListeners(new ScoreIterationListener(1)); log.info("Train model...."); for( int i=0; i<numEpochs; i++ ){ model.fit(mnistTrain); } log.info("Evaluate model...."); Evaluation eval = new Evaluation(outputNum); //create an evaluation object with 10 possible classes while(mnistTest.hasNext()){ DataSet next = mnistTest.next(); INDArray output = model.output(next.getFeatures(), false); //get the networks prediction eval.eval(next.getLabels(), output); //check the prediction against the true class } try { ModelSerializer.writeModel(model, new File("model/SingleLayerModel.zip"), false); } catch (IOException e) { e.printStackTrace(); } log.info(eval.stats()); log.info("****************Example finished********************"); }
Example 12
Source File: LearnIrisBackprop.java From aifh with Apache License 2.0 | 4 votes |
/** * The main method. * @param args Not used. */ public static void main(String[] args) { try { int seed = 43; double learningRate = 0.1; int splitTrainNum = (int) (150 * .75); int numInputs = 4; int numOutputs = 3; int numHiddenNodes = 50; // Setup training data. final InputStream istream = LearnIrisBackprop.class.getResourceAsStream("/iris.csv"); if( istream==null ) { System.out.println("Cannot access data set, make sure the resources are available."); System.exit(1); } final NormalizeDataSet ds = NormalizeDataSet.load(istream); final CategoryMap species = ds.encodeOneOfN(4); // species is column 4 istream.close(); DataSet next = ds.extractSupervised(0, 4, 4, 3); next.shuffle(); // Training and validation data split SplitTestAndTrain testAndTrain = next.splitTestAndTrain(splitTrainNum, new Random(seed)); DataSet trainSet = testAndTrain.getTrain(); DataSet validationSet = testAndTrain.getTest(); DataSetIterator trainSetIterator = new ListDataSetIterator(trainSet.asList(), trainSet.numExamples()); DataSetIterator validationSetIterator = new ListDataSetIterator(validationSet.asList(), validationSet.numExamples()); // Create neural network. MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(seed) .iterations(1) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .learningRate(learningRate) .updater(Updater.NESTEROVS).momentum(0.9) .list(2) .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes) .weightInit(WeightInit.XAVIER) .activation("relu") .build()) .layer(1, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD) .weightInit(WeightInit.XAVIER) .activation("softmax") .nIn(numHiddenNodes).nOut(numOutputs).build()) .pretrain(false).backprop(true).build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); model.setListeners(new ScoreIterationListener(1)); // Define when we want to stop training. EarlyStoppingModelSaver saver = new InMemoryModelSaver(); EarlyStoppingConfiguration esConf = new EarlyStoppingConfiguration.Builder() .epochTerminationConditions(new MaxEpochsTerminationCondition(500)) //Max of 50 epochs .epochTerminationConditions(new ScoreImprovementEpochTerminationCondition(25)) .evaluateEveryNEpochs(1) .scoreCalculator(new DataSetLossCalculator(validationSetIterator, true)) //Calculate test set score .modelSaver(saver) .build(); EarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, conf, trainSetIterator); // Train and display result. EarlyStoppingResult result = trainer.fit(); System.out.println("Termination reason: " + result.getTerminationReason()); System.out.println("Termination details: " + result.getTerminationDetails()); System.out.println("Total epochs: " + result.getTotalEpochs()); System.out.println("Best epoch number: " + result.getBestModelEpoch()); System.out.println("Score at best epoch: " + result.getBestModelScore()); model = saver.getBestModel(); // Evaluate Evaluation eval = new Evaluation(numOutputs); validationSetIterator.reset(); for (int i = 0; i < validationSet.numExamples(); i++) { DataSet t = validationSet.get(i); INDArray features = t.getFeatureMatrix(); INDArray labels = t.getLabels(); INDArray predicted = model.output(features, false); System.out.println(features + ":Prediction("+findSpecies(labels,species) +"):Actual("+findSpecies(predicted,species)+")" + predicted ); eval.eval(labels, predicted); } //Print the evaluation statistics System.out.println(eval.stats()); } catch(Exception ex) { ex.printStackTrace(); } }
Example 13
Source File: LearnDigitsBackprop.java From aifh with Apache License 2.0 | 4 votes |
/** * The main method. * @param args Not used. */ public static void main(String[] args) { try { int seed = 43; double learningRate = 1e-2; int nEpochs = 50; int batchSize = 500; // Setup training data. System.out.println("Please wait, reading MNIST training data."); String dir = System.getProperty("user.dir"); MNISTReader trainingReader = MNIST.loadMNIST(dir, true); MNISTReader validationReader = MNIST.loadMNIST(dir, false); DataSet trainingSet = trainingReader.getData(); DataSet validationSet = validationReader.getData(); DataSetIterator trainSetIterator = new ListDataSetIterator(trainingSet.asList(), batchSize); DataSetIterator validationSetIterator = new ListDataSetIterator(validationSet.asList(), validationReader.getNumRows()); System.out.println("Training set size: " + trainingReader.getNumImages()); System.out.println("Validation set size: " + validationReader.getNumImages()); System.out.println(trainingSet.get(0).getFeatures().size(1)); System.out.println(validationSet.get(0).getFeatures().size(1)); int numInputs = trainingReader.getNumCols()*trainingReader.getNumRows(); int numOutputs = 10; int numHiddenNodes = 200; // Create neural network. MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(seed) .iterations(1) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .learningRate(learningRate) .updater(Updater.NESTEROVS).momentum(0.9) .regularization(true).dropOut(0.50) .list(2) .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes) .weightInit(WeightInit.XAVIER) .activation("relu") .build()) .layer(1, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD) .weightInit(WeightInit.XAVIER) .activation("softmax") .nIn(numHiddenNodes).nOut(numOutputs).build()) .pretrain(false).backprop(true).build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); model.setListeners(new ScoreIterationListener(1)); // Define when we want to stop training. EarlyStoppingModelSaver saver = new InMemoryModelSaver(); EarlyStoppingConfiguration esConf = new EarlyStoppingConfiguration.Builder() //.epochTerminationConditions(new MaxEpochsTerminationCondition(10)) .epochTerminationConditions(new ScoreImprovementEpochTerminationCondition(5)) .evaluateEveryNEpochs(1) .scoreCalculator(new DataSetLossCalculator(validationSetIterator, true)) //Calculate test set score .modelSaver(saver) .build(); EarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, conf, trainSetIterator); // Train and display result. EarlyStoppingResult result = trainer.fit(); System.out.println("Termination reason: " + result.getTerminationReason()); System.out.println("Termination details: " + result.getTerminationDetails()); System.out.println("Total epochs: " + result.getTotalEpochs()); System.out.println("Best epoch number: " + result.getBestModelEpoch()); System.out.println("Score at best epoch: " + result.getBestModelScore()); model = saver.getBestModel(); // Evaluate Evaluation eval = new Evaluation(numOutputs); validationSetIterator.reset(); for (int i = 0; i < validationSet.numExamples(); i++) { DataSet t = validationSet.get(i); INDArray features = t.getFeatureMatrix(); INDArray labels = t.getLabels(); INDArray predicted = model.output(features, false); eval.eval(labels, predicted); } //Print the evaluation statistics System.out.println(eval.stats()); } catch(Exception ex) { ex.printStackTrace(); } }
Example 14
Source File: MLPMnistTwoLayerExample.java From dl4j-tutorials with MIT License | 4 votes |
public static void main(String[] args) throws Exception { //number of rows and columns in the input pictures final int numRows = 28; final int numColumns = 28; int outputNum = 10; // number of output classes int batchSize = 64; // batch size for each epoch int rngSeed = 123; // random number seed for reproducibility int numEpochs = 15; // number of epochs to perform double rate = 0.0015; // learning rate //Get the DataSetIterators: DataSetIterator mnistTrain = new MnistDataSetIterator(batchSize, true, rngSeed); DataSetIterator mnistTest = new MnistDataSetIterator(batchSize, false, rngSeed); log.info("Build model...."); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(rngSeed) //include a random seed for reproducibility .activation(Activation.RELU) .weightInit(WeightInit.XAVIER) .updater(new Nesterovs(rate, 0.98)) .l2(rate * 0.005) // regularize learning model .list() .layer(0, new DenseLayer.Builder() //create the first input layer. .nIn(numRows * numColumns) .nOut(500) .build()) .layer(1, new DenseLayer.Builder() //create the second input layer .nIn(500) .nOut(100) .build()) .layer(2, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD) //create hidden layer .activation(Activation.SOFTMAX) .nIn(100) .nOut(outputNum) .build()) .pretrain(false).backprop(true) //use backpropagation to adjust weights .build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); model.setListeners(new ScoreIterationListener(5)); //print the score with every iteration log.info("Train model...."); for( int i=0; i<numEpochs; i++ ){ log.info("Epoch " + i); model.fit(mnistTrain); } log.info("Evaluate model...."); Evaluation eval = new Evaluation(outputNum); //create an evaluation object with 10 possible classes while(mnistTest.hasNext()){ DataSet next = mnistTest.next(); INDArray output = model.output(next.getFeatures()); //get the networks prediction eval.eval(next.getLabels(), output); //check the prediction against the true class } log.info(eval.stats()); log.info("****************Example finished********************"); }
Example 15
Source File: MLPMnistUIExample.java From dl4j-tutorials with MIT License | 4 votes |
public static void main(String[] args) throws IOException { //number of rows and columns in the input pictures final int numRows = 28; final int numColumns = 28; int outputNum = 10; // number of output classes int batchSize = 128; // batch size for each epoch int rngSeed = 123; // random number seed for reproducibility int numEpochs = 15; // number of epochs to perform int listenerFrequency = 1; //Get the DataSetIterators: DataSetIterator mnistTrain = new MnistDataSetIterator(batchSize, true, rngSeed); DataSetIterator mnistTest = new MnistDataSetIterator(batchSize, false, rngSeed); log.info("Build model...."); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(rngSeed) //include a random seed for reproducibility // use stochastic gradient descent as an optimization algorithm .updater(new Nesterovs(0.006, 0.9)) .l2(1e-4) .list() .layer(0, new DenseLayer.Builder() //create the first, input layer with xavier initialization // batchSize, features .nIn(numRows * numColumns) .nOut(1000) .activation(Activation.RELU) .weightInit(WeightInit.XAVIER) .build()) .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) //create hidden layer .nIn(1000) .nOut(outputNum) .activation(Activation.SOFTMAX) .weightInit(WeightInit.XAVIER) .build()) .pretrain(false).backprop(true) //use backpropagation to adjust weights .build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); //Initialize the user interface backend // 获取一个UI实例 UIServer uiServer = UIServer.getInstance(); //Configure where the network information (gradients, activations, score vs. time etc) is to be stored //Then add the StatsListener to collect this information from the network, as it trains // 训练的存储位置 StatsStorage statsStorage = new InMemoryStatsStorage(); //Alternative: new FileStatsStorage(File) - see UIStorageExample //Attach the StatsStorage instance to the UI: this allows the contents of the StatsStorage to be visualized uiServer.attach(statsStorage); model.init(); //print the score with every 1 iteration model.setListeners(new StatsListener(statsStorage, listenerFrequency) ,new ScoreIterationListener(1) ); log.info("Train model...."); for( int i=0; i<numEpochs; i++ ){ model.fit(mnistTrain); } log.info("Evaluate model...."); Evaluation eval = new Evaluation(outputNum); //create an evaluation object with 10 possible classes while(mnistTest.hasNext()){ DataSet next = mnistTest.next(); INDArray output = model.output(next.getFeatures(), false); //get the networks prediction eval.eval(next.getLabels(), output); //check the prediction against the true class } log.info(eval.stats()); log.info("****************Example finished********************"); }
Example 16
Source File: DataSetIteratorTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
public void runCifar(boolean preProcessCifar) throws Exception { final int height = 32; final int width = 32; int channels = 3; int outputNum = CifarLoader.NUM_LABELS; int batchSize = 5; int seed = 123; int listenerFreq = 1; Cifar10DataSetIterator cifar = new Cifar10DataSetIterator(batchSize); MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new ConvolutionLayer.Builder(5, 5).nIn(channels).nOut(6).weightInit(WeightInit.XAVIER) .activation(Activation.RELU).build()) .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2}) .build()) .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX) .build()) .setInputType(InputType.convolutionalFlat(height, width, channels)); MultiLayerNetwork model = new MultiLayerNetwork(builder.build()); model.init(); //model.setListeners(Arrays.asList((TrainingListener) new ScoreIterationListener(listenerFreq))); CollectScoresIterationListener listener = new CollectScoresIterationListener(listenerFreq); model.setListeners(listener); model.fit(cifar); cifar = new Cifar10DataSetIterator(batchSize); Evaluation eval = new Evaluation(cifar.getLabels()); while (cifar.hasNext()) { DataSet testDS = cifar.next(batchSize); INDArray output = model.output(testDS.getFeatures()); eval.eval(testDS.getLabels(), output); } // System.out.println(eval.stats(true)); listener.exportScores(System.out); }
Example 17
Source File: LearnDigitsDropout.java From aifh with Apache License 2.0 | 4 votes |
/** * The main method. * @param args Not used. */ public static void main(String[] args) { try { int seed = 43; double learningRate = 1e-2; int nEpochs = 50; int batchSize = 500; // Setup training data. System.out.println("Please wait, reading MNIST training data."); String dir = System.getProperty("user.dir"); MNISTReader trainingReader = MNIST.loadMNIST(dir, true); MNISTReader validationReader = MNIST.loadMNIST(dir, false); DataSet trainingSet = trainingReader.getData(); DataSet validationSet = validationReader.getData(); DataSetIterator trainSetIterator = new ListDataSetIterator(trainingSet.asList(), batchSize); DataSetIterator validationSetIterator = new ListDataSetIterator(validationSet.asList(), validationReader.getNumRows()); System.out.println("Training set size: " + trainingReader.getNumImages()); System.out.println("Validation set size: " + validationReader.getNumImages()); System.out.println(trainingSet.get(0).getFeatures().size(1)); System.out.println(validationSet.get(0).getFeatures().size(1)); int numInputs = trainingReader.getNumCols()*trainingReader.getNumRows(); int numOutputs = 10; int numHiddenNodes = 100; // Create neural network. MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(seed) .iterations(1) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .learningRate(learningRate) .updater(Updater.NESTEROVS).momentum(0.9) .list(2) .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes) .weightInit(WeightInit.XAVIER) .activation("relu") .build()) .layer(1, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD) .weightInit(WeightInit.XAVIER) .activation("softmax") .nIn(numHiddenNodes).nOut(numOutputs).build()) .pretrain(false).backprop(true).build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); model.setListeners(new ScoreIterationListener(1)); // Define when we want to stop training. EarlyStoppingModelSaver saver = new InMemoryModelSaver(); EarlyStoppingConfiguration esConf = new EarlyStoppingConfiguration.Builder() //.epochTerminationConditions(new MaxEpochsTerminationCondition(10)) .epochTerminationConditions(new ScoreImprovementEpochTerminationCondition(5)) .evaluateEveryNEpochs(1) .scoreCalculator(new DataSetLossCalculator(validationSetIterator, true)) //Calculate test set score .modelSaver(saver) .build(); EarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, conf, trainSetIterator); // Train and display result. EarlyStoppingResult result = trainer.fit(); System.out.println("Termination reason: " + result.getTerminationReason()); System.out.println("Termination details: " + result.getTerminationDetails()); System.out.println("Total epochs: " + result.getTotalEpochs()); System.out.println("Best epoch number: " + result.getBestModelEpoch()); System.out.println("Score at best epoch: " + result.getBestModelScore()); model = saver.getBestModel(); // Evaluate Evaluation eval = new Evaluation(numOutputs); validationSetIterator.reset(); for (int i = 0; i < validationSet.numExamples(); i++) { DataSet t = validationSet.get(i); INDArray features = t.getFeatureMatrix(); INDArray labels = t.getLabels(); INDArray predicted = model.output(features, false); eval.eval(labels, predicted); } //Print the evaluation statistics System.out.println(eval.stats()); } catch(Exception ex) { ex.printStackTrace(); } }
Example 18
Source File: ParallelInferenceTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
protected void evalClassifcationMultipleThreads(@NonNull ParallelInference inf, @NonNull DataSetIterator iterator, int numThreads) throws Exception { DataSet ds = iterator.next(); log.info("NumColumns: {}", ds.getLabels().columns()); iterator.reset(); Evaluation eval = new Evaluation(ds.getLabels().columns()); final Queue<DataSet> dataSets = new LinkedBlockingQueue<>(); final Queue<Pair<INDArray, INDArray>> outputs = new LinkedBlockingQueue<>(); int cnt = 0; // first of all we'll build datasets while (iterator.hasNext() && cnt < 256) { ds = iterator.next(); dataSets.add(ds); cnt++; } // now we'll build outputs in parallel Thread[] threads = new Thread[numThreads]; for (int i = 0; i < numThreads; i++) { threads[i] = new Thread(new Runnable() { @Override public void run() { DataSet ds; while ((ds = dataSets.poll()) != null) { INDArray output = inf.output(ds); outputs.add(Pair.makePair(ds.getLabels(), output)); } } }); } for (int i = 0; i < numThreads; i++) { threads[i].start(); } for (int i = 0; i < numThreads; i++) { threads[i].join(); } // and now we'll evaluate in single thread once again Pair<INDArray, INDArray> output; while ((output = outputs.poll()) != null) { eval.eval(output.getFirst(), output.getSecond()); } log.info(eval.stats()); }
Example 19
Source File: IrisClassifier.java From tutorials with MIT License | 4 votes |
public static void main(String[] args) throws IOException, InterruptedException { DataSet allData; try (RecordReader recordReader = new CSVRecordReader(0, ',')) { recordReader.initialize(new FileSplit(new ClassPathResource("iris.txt").getFile())); DataSetIterator iterator = new RecordReaderDataSetIterator(recordReader, 150, FEATURES_COUNT, CLASSES_COUNT); allData = iterator.next(); } allData.shuffle(42); DataNormalization normalizer = new NormalizerStandardize(); normalizer.fit(allData); normalizer.transform(allData); SplitTestAndTrain testAndTrain = allData.splitTestAndTrain(0.65); DataSet trainingData = testAndTrain.getTrain(); DataSet testData = testAndTrain.getTest(); MultiLayerConfiguration configuration = new NeuralNetConfiguration.Builder() .iterations(1000) .activation(Activation.TANH) .weightInit(WeightInit.XAVIER) .regularization(true) .learningRate(0.1).l2(0.0001) .list() .layer(0, new DenseLayer.Builder().nIn(FEATURES_COUNT).nOut(3) .build()) .layer(1, new DenseLayer.Builder().nIn(3).nOut(3) .build()) .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .activation(Activation.SOFTMAX) .nIn(3).nOut(CLASSES_COUNT).build()) .backpropType(BackpropType.Standard).pretrain(false) .build(); MultiLayerNetwork model = new MultiLayerNetwork(configuration); model.init(); model.fit(trainingData); INDArray output = model.output(testData.getFeatures()); Evaluation eval = new Evaluation(CLASSES_COUNT); eval.eval(testData.getLabels(), output); System.out.println(eval.stats()); }
Example 20
Source File: LossLayer.java From deeplearning4j with Apache License 2.0 | 3 votes |
/** * Returns the f1 score for the given examples. * Think of this to be like a percentage right. * The higher the number the more it got right. * This is on a scale from 0 to 1. * * @param examples te the examples to classify (one example in each row) * @param labels the true labels * @return the scores for each ndarray */ @Override public double f1Score(INDArray examples, INDArray labels) { Evaluation eval = new Evaluation(); eval.eval(labels, activate(examples, false, LayerWorkspaceMgr.noWorkspacesImmutable())); return eval.f1(); }