org.neuroph.nnet.MultiLayerPerceptron Java Examples
The following examples show how to use
org.neuroph.nnet.MultiLayerPerceptron.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MomentumBackPropagationTest.java From NeurophFramework with Apache License 2.0 | 7 votes |
@Test public void testXorMSE() { MultiLayerPerceptron myMlPerceptron = new MultiLayerPerceptron(TransferFunctionType.SIGMOID, 2, 3, 1); myMlPerceptron.randomizeWeights(new WeightsRandomizer(new Random(123))); myMlPerceptron.setLearningRule(instance); myMlPerceptron.learn(xorDataSet); MeanSquaredError mse = new MeanSquaredError(); for (DataSetRow testSetRow : xorDataSet.getRows()) { myMlPerceptron.setInput(testSetRow.getInput()); myMlPerceptron.calculate(); double[] networkOutput = myMlPerceptron.getOutput(); mse.addPatternError(networkOutput, testSetRow.getDesiredOutput()); } assertTrue(mse.getTotalError() < maxError); }
Example #2
Source File: NeuralNetworkFactory.java From NeurophFramework with Apache License 2.0 | 6 votes |
/** * Creates and returns a new instance of Multi Layer Perceptron * @param layersStr space separated number of neurons in layers * @param transferFunctionType transfer function type for neurons * @return instance of Multi Layer Perceptron */ public static MultiLayerPerceptron createMLPerceptron(String layersStr, TransferFunctionType transferFunctionType, Class learningRule, boolean useBias, boolean connectIO) { ArrayList<Integer> layerSizes = VectorParser.parseInteger(layersStr); NeuronProperties neuronProperties = new NeuronProperties(transferFunctionType, useBias); MultiLayerPerceptron nnet = new MultiLayerPerceptron(layerSizes, neuronProperties); // set learning rule - TODO: use reflection here if (learningRule.getName().equals(BackPropagation.class.getName())) { nnet.setLearningRule(new BackPropagation()); } else if (learningRule.getName().equals(MomentumBackpropagation.class.getName())) { nnet.setLearningRule(new MomentumBackpropagation()); } else if (learningRule.getName().equals(DynamicBackPropagation.class.getName())) { nnet.setLearningRule(new DynamicBackPropagation()); } else if (learningRule.getName().equals(ResilientPropagation.class.getName())) { nnet.setLearningRule(new ResilientPropagation()); } // connect io if (connectIO) { nnet.connectInputsToOutputs(); } return nnet; }
Example #3
Source File: IrisFlowers.java From NeurophFramework with Apache License 2.0 | 6 votes |
public void run() throws InterruptedException, ExecutionException { System.out.println("Creating training set..."); // get path to training set String dataSetFile = "data_sets/iris_data_normalised.txt"; int inputsCount = 4; int outputsCount = 3; // create training set from file DataSet dataSet = DataSet.createFromFile(dataSetFile, inputsCount, outputsCount, ","); // dataSet.setColumnNames(new String[]{"sepal.length", "sepal.width", "petal.length", "petal.width", "setosa", "versicolor", "virginica"}); dataSet.setColumnNames(new String[]{"setosa", "versicolor", "virginica"}); dataSet.shuffle(); System.out.println("Creating neural network..."); MultiLayerPerceptron neuralNet = new MultiLayerPerceptron(TransferFunctionType.TANH, inputsCount, 5, outputsCount); String[] classLabels = new String[]{"setosa", "versicolor", "virginica"}; neuralNet.setOutputLabels(classLabels); KFoldCrossValidation crossVal = new KFoldCrossValidation(neuralNet, dataSet, 5); EvaluationResult totalResult= crossVal.run(); List<FoldResult> cflist= crossVal.getResultsByFolds(); }
Example #4
Source File: MlpNetworkTrainer.java From developerWorks with Apache License 2.0 | 6 votes |
/** * Runs the specified network using the double array as input data. This data should * be normalized or things will get real weird, real quick. * * @param network * The MLP network to be run * @param input * The normalized input data to use to run the network * @return double[] - The network's "answers" from running the network. For the networks * CTV'd with input data created by DataCreator, this means two doubles, whose range is * between 0.0 and 1.0: * <ol> * <li>ret[0] - The probability the Home team wins (Away team loses)</li> * <li>ret[1] - The probability the Home team loses (Away team wins)</li> * </ol> */ protected double[] runNetwork(MultiLayerPerceptron network, double[] input) { double[] ret; network.setInput(input); network.calculate(); // Return value is the network's output ret = network.getOutput(); if (log.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append("Comparison: Input to Output:\n"); sb.append("Input : "); sb.append(Arrays.toString(input)); sb.append('\n'); sb.append("Output: "); sb.append(Arrays.toString(ret)); log.trace(sb.toString()); } if (log.isTraceEnabled()) { log.trace("Network Input : " + Arrays.toString(input)); log.trace("Network Output: " + Arrays.toString(ret)); } return ret; }
Example #5
Source File: ClassifierEvaluationSample.java From NeurophFramework with Apache License 2.0 | 6 votes |
public static void main(String[] args) { Evaluation evaluation = new Evaluation(); evaluation.addEvaluator(new ErrorEvaluator(new MeanSquaredError())); String[] classNames = {"Virginica", "Setosa", "Versicolor"}; MultiLayerPerceptron neuralNet = (MultiLayerPerceptron) NeuralNetwork.createFromFile("irisNet.nnet"); DataSet dataSet = DataSet.createFromFile("data_sets/iris_data_normalised.txt", 4, 3, ","); evaluation.addEvaluator(new ClassifierEvaluator.MultiClass(classNames)); evaluation.evaluate(neuralNet, dataSet); ClassifierEvaluator evaluator = evaluation.getEvaluator(ClassifierEvaluator.MultiClass.class); ConfusionMatrix confusionMatrix = evaluator.getResult(); System.out.println("Confusion matrrix:\r\n"); System.out.println(confusionMatrix.toString() + "\r\n\r\n"); System.out.println("Classification metrics\r\n"); ClassificationMetrics[] metrics = ClassificationMetrics.createFromMatrix(confusionMatrix); ClassificationMetrics.Stats average = ClassificationMetrics.average(metrics); for (ClassificationMetrics cm : metrics) { System.out.println(cm.toString() + "\r\n"); } System.out.println(average.toString()); }
Example #6
Source File: MlpNetworkTrainer.java From developerWorks with Apache License 2.0 | 6 votes |
/** * Train the specified MLP network using the specified training data, store metrics in the * specified metrics object. * * @param trainingData * The data used to train the network. * @param network * The MLP network to be trained. * @param metrics * The {@link NetworkMetrics} object where metrics info is stored. */ private void trainNetwork(DataSet trainingData, MultiLayerPerceptron network) { // // Shuffle the training data. Adds an element of randomness to the data. trainingData.shuffle(); // // Now learn, you! network.learn(trainingData); // // Learning complete. Set metrics. NetworkMetrics metrics = networkMetricsCache.get(network); metrics.setIterationLearnTime(System.currentTimeMillis() - metrics.getLearnStartTime()); metrics.setTotalLearnTime(metrics.getTotalLearnTime() + metrics.getIterationLearnTime()); metrics.setNumberOfAsymmetricWinsThisIteration(0); metrics.setNumberOfSymmetricWinsThisIteration(0); metrics.setNumberOfGamesThisIteration(0); }
Example #7
Source File: MlpNetworkTrainer.java From developerWorks with Apache License 2.0 | 6 votes |
/** * Create the {@link NetworkMetrics} object. It is used to keep track of information about * the networks produced by this run. * * @param yearsToSimulate * The years for which simulations are to be run against the trained * networks produced by the program to validate them. * * @param neuronLayerDescriptor * The network descriptor, each Integer in the array represents * the number of neurons in that layer. The 0th element represents the input layer, the last element * represents the output layer, with hidden layers between them. * * @param neuronProperties * The {@link NeuronProperties} Neuroph metadata object. Used when creating * a new network as a convenient way to set a bunch of properties all at once. * * @return {@link NetworkMetrics} - the metrics object. */ private NetworkMetrics createNetworkMetrics(MultiLayerPerceptron network, Integer[] yearsToSimulate, List<Integer> neuronLayerDescriptor, NeuronProperties neuronProperties) { String neuronLayerDescriptorString = NetworkUtils.generateLayerStructureString(neuronLayerDescriptor); // // Create metrics log.info("*********** FETCHING NETWORK METRICS **************"); NetworkMetrics metrics = networkMetricsCache.get(network); if (metrics == null) { log.info("*********** CREATED NEW NETWORK METRICS FOR THIS NETWORK (" + neuronLayerDescriptorString + ") **************"); metrics = new NetworkMetrics(); networkMetricsCache.put(network, metrics); } metrics.setNeuronProperties(neuronProperties); metrics.setIterationStartTime(System.currentTimeMillis()); metrics.setLearnStartTime(System.currentTimeMillis()); metrics.setLayerStructure(neuronLayerDescriptorString); metrics.setNumberOfIterationsSoFar(metrics.getNumberOfIterationsSoFar() + 1); metrics.setSimulationYears(yearsToSimulate); return metrics; }
Example #8
Source File: TournamentMatrixPredictor.java From developerWorks with Apache License 2.0 | 6 votes |
/** * The "do it" method. Drives the entire matrix production. * Pretty cool, man. * * @param tournamentYear */ public void go(Integer tournamentYear) { // // Load the networks List<MultiLayerPerceptron> networks = loadNetworks(); Set<String> teamNames = fetchTournamentTeams(tournamentYear); // // Now generate the matrix. Every team in the file against /// every other team in the file. This will let us make up /// different brackets without having to re-run the simulator. Map<String, List<GameSimulationResult<MultiLayerPerceptron>>> matrix = computeMatrix(tournamentYear, networks, teamNames); // // Write the matrix to CSV file (TODO: use POI?) writeMatrixFile(matrix); }
Example #9
Source File: SunSpots.java From NeurophFramework with Apache License 2.0 | 6 votes |
public void run() { // uncomment the following line to use regular Neuroph (non-flat) processing //Neuroph.getInstance().setFlattenNetworks(false); // create neural network NeuralNetwork network = new MultiLayerPerceptron(TransferFunctionType.SIGMOID, WINDOW_SIZE, 10, 1); // normalize training data normalizeSunspots(0.1, 0.9); network.getLearningRule().addListener(this); // create training set DataSet trainingSet = generateTrainingData(); network.learn(trainingSet); predict(network); Neuroph.getInstance().shutdown(); }
Example #10
Source File: MlpNetworkTrainer.java From developerWorks with Apache License 2.0 | 6 votes |
/** * Determines whether the specified network should be saved or not. * Keeps the logic separate for making this determination. Down the road I might * make this more complicated, or subclasses could determine this however they * want. * * @param network * * @return - Returns true if the network should be saved, false otherwise. */ protected boolean networkShouldBeSaved(MultiLayerPerceptron network) { // // Get the network metrics NetworkMetrics metrics = networkMetricsCache.get(network); // // Current method of determing whether or not network should be saved: // Compute winning percentage and round it to an integer number and compare it /// the threshold value in NetworkProperties BigDecimal winningPercentage = BigDecimal .valueOf(100.0 * metrics.getNumberOfWinsThisIteration() / metrics.getNumberOfGamesThisIteration()).setScale(5, RoundingMode.HALF_UP); BigDecimal currentPerformance = BigDecimal.valueOf(winningPercentage.doubleValue()).setScale(0, RoundingMode.HALF_UP); // // Return true if network performed above threshold, false otherwise return currentPerformance.doubleValue() >= NetworkProperties.getPerformanceThreshold().doubleValue(); }
Example #11
Source File: TestTimeSeries.java From NeurophFramework with Apache License 2.0 | 6 votes |
public void train() { // get the path to file with data String inputFileName = "C:\\timeseries\\BSW15"; // create MultiLayerPerceptron neural network neuralNet = new MultiLayerPerceptron(TransferFunctionType.TANH, 5, 10, 1); MomentumBackpropagation learningRule = (MomentumBackpropagation)neuralNet.getLearningRule(); learningRule.setLearningRate(0.2); learningRule.setMomentum(0.5); // learningRule.addObserver(this); learningRule.addListener(this); // create training set from file trainingSet = DataSet.createFromFile(inputFileName, 5, 1, "\t", false); // train the network with training set neuralNet.learn(trainingSet); System.out.println("Done training."); }
Example #12
Source File: TestMatrixMLP.java From NeurophFramework with Apache License 2.0 | 6 votes |
/** * Create and run MLP with XOR training set */ public static void main(String[] args) { // create training set (logical XOR function) DataSet trainingSet = new DataSet(2, 1); trainingSet.add(new DataSetRow(new double[]{0, 0}, new double[]{0})); trainingSet.add(new DataSetRow(new double[]{0, 1}, new double[]{1})); trainingSet.add(new DataSetRow(new double[]{1, 0}, new double[]{1})); trainingSet.add(new DataSetRow(new double[]{1, 1}, new double[]{0})); MultiLayerPerceptron nnet = new MultiLayerPerceptron( TransferFunctionType.TANH ,2, 3, 1); MatrixMultiLayerPerceptron mnet = new MatrixMultiLayerPerceptron(nnet); System.out.println("Training network..."); mnet.learn(trainingSet); System.out.println("Done training network."); }
Example #13
Source File: RandomizationSample.java From NeurophFramework with Apache License 2.0 | 5 votes |
/** * Runs this sample */ public static void main(String[] args) { MultiLayerPerceptron neuralNet = new MultiLayerPerceptron(2, 3, 1); // neuralNet.randomizeWeights(new WeightsRandomizer()); // neuralNet.randomizeWeights(new RangeRandomizer(0.1, 0.9)); // neuralNet.randomizeWeights(new GaussianRandomizer(0.4, 0.3)); neuralNet.randomizeWeights(new NguyenWidrowRandomizer(0.3, 0.7)); printWeights(neuralNet); neuralNet.randomizeWeights(new DistortRandomizer(0.5)); printWeights(neuralNet); }
Example #14
Source File: BalanceScaleSample.java From NeurophFramework with Apache License 2.0 | 5 votes |
public void run() { System.out.println("Creating training set..."); String dataSetFile = "data_sets/balance_scale_data.txt"; int inputsCount = 20; int outputsCount = 3; // create training set from file DataSet dataSet = DataSet.createFromFile(dataSetFile, inputsCount, outputsCount, "\t", false); System.out.println("Creating neural network..."); // create MultiLayerPerceptron neural network MultiLayerPerceptron neuralNet = new MultiLayerPerceptron(inputsCount, 22, outputsCount); // attach listener to learning rule MomentumBackpropagation learningRule = (MomentumBackpropagation) neuralNet.getLearningRule(); learningRule.addListener(this); // set learning rate and max error learningRule.setLearningRate(0.2); learningRule.setMaxError(0.01); System.out.println("Training network..."); // train the network with training set neuralNet.learn(dataSet); System.out.println("Training completed."); System.out.println("Testing network..."); testNeuralNetwork(neuralNet, dataSet); System.out.println("Saving network"); // save neural network to file neuralNet.save("MyNeuralNetBalanceScale.nnet"); System.out.println("Done."); }
Example #15
Source File: FileIOSample.java From NeurophFramework with Apache License 2.0 | 5 votes |
/** * Runs this sample */ public static void main(String[] args) throws FileNotFoundException, IOException { // create neural network MultiLayerPerceptron neuralNet = new MultiLayerPerceptron(2, 3, 1); // use file provided in org.neuroph.sample.data package String inputFileName = FileIOSample.class.getResource("data/xor_data.txt").getFile(); // create file input adapter using specifed file FileInputAdapter fileIn = new FileInputAdapter(inputFileName); // create file output adapter using specified file name FileOutputAdapter fileOut = new FileOutputAdapter("some_output_file.txt"); double[] input; // input buffer used for reading network input from file // read network input using input adapter while( (input = fileIn.readInput()) != null) { // feed neywork with input neuralNet.setInput(input); // calculate network ... neuralNet.calculate(); // .. and get network output double[] output = neuralNet.getOutput(); // write network output using output adapter fileOut.writeOutput(output); } // close input and output files fileIn.close(); fileOut.close(); // Also note that shorter way for this is using org.neuroph.util.io.IOHelper class }
Example #16
Source File: CarEvaluationSample.java From NeurophFramework with Apache License 2.0 | 5 votes |
public void run() { System.out.println("Creating training set..."); String dataSetFile = "data_sets/car_evaluation_data.txt"; int inputsCount = 21; int outputsCount = 4; // create training set from file DataSet dataSet = DataSet.createFromFile(dataSetFile, inputsCount, outputsCount, "\t", false); System.out.println("Creating neural network..."); // create MultiLayerPerceptron neural network MultiLayerPerceptron neuralNet = new MultiLayerPerceptron(inputsCount, 22, outputsCount); // attach listener to learning rule MomentumBackpropagation learningRule = (MomentumBackpropagation) neuralNet.getLearningRule(); learningRule.addListener(this); // set learning rate and max error learningRule.setLearningRate(0.2); learningRule.setMaxError(0.01); System.out.println("Training network..."); // train the network with training set neuralNet.learn(dataSet); System.out.println("Training completed."); System.out.println("Testing network..."); testNeuralNetwork(neuralNet, dataSet); System.out.println("Saving network"); // save neural network to file neuralNet.save("MyNeuralNetCarEvaluation.nnet"); System.out.println("Done."); }
Example #17
Source File: PredictingTheReligionSample.java From NeurophFramework with Apache License 2.0 | 5 votes |
public void run() { System.out.println("Creating training set..."); // get path to training set String dataSetFile = "data_sets/religion_data.txt"; int inputsCount = 54; int outputsCount = 5; // create training set from file DataSet dataSet = DataSet.createFromFile(dataSetFile, inputsCount, outputsCount, "\t", false); System.out.println("Creating neural network..."); // create MultiLayerPerceptron neural network MultiLayerPerceptron neuralNet = new MultiLayerPerceptron(inputsCount, 22, outputsCount); // attach listener to learning rule MomentumBackpropagation learningRule = (MomentumBackpropagation) neuralNet.getLearningRule(); learningRule.addListener(this); // set learning rate and max error learningRule.setLearningRate(0.2); learningRule.setMaxError(0.01); System.out.println("Training network..."); // train the network with training set neuralNet.learn(dataSet); System.out.println("Training completed."); System.out.println("Testing network..."); testNeuralNetwork(neuralNet, dataSet); System.out.println("Saving network"); // save neural network to file neuralNet.save("MyNeuralNetReligion.nnet"); System.out.println("Done."); }
Example #18
Source File: PredictingPokerHandsSample.java From NeurophFramework with Apache License 2.0 | 5 votes |
public void run() { System.out.println("Creating training set..."); String dataSetFile = "data_sets/predicting_poker_hands_data.txt"; int inputsCount = 85; int outputsCount = 9; // create training set from file DataSet dataSet = DataSet.createFromFile(dataSetFile, inputsCount, outputsCount, "\t", false); System.out.println("Creating neural network..."); // create MultiLayerPerceptron neural network MultiLayerPerceptron neuralNet = new MultiLayerPerceptron(inputsCount, 65, outputsCount); // attach listener to learning rule MomentumBackpropagation learningRule = (MomentumBackpropagation) neuralNet.getLearningRule(); learningRule.addListener(this); // set learning rate and max error learningRule.setLearningRate(0.2); learningRule.setMaxError(0.01); System.out.println("Training network..."); // train the network with training set neuralNet.learn(dataSet); System.out.println("Training completed."); System.out.println("Testing network..."); testNeuralNetwork(neuralNet, dataSet); System.out.println("Saving network"); // save neural network to file neuralNet.save("MyNeuralNetPokerHands.nnet"); System.out.println("Done."); }
Example #19
Source File: ImageRecognitionHelper.java From NeurophFramework with Apache License 2.0 | 5 votes |
/** * Creates and returns new neural network for image recognition. * Assumes that all of the FractionRgbData objects in the given map have identical * length arrays in them so that the input layer of the neural network can be * created here. * * @param label neural network label * @param samplingResolution sampling resolution (image size) * @param imageLabels image labels * @param layersNeuronsCount neuron counts in hidden layers * @param transferFunctionType type of transfer function to use for neurons in network * @param colorMode color mode * @return */ public static NeuralNetwork createNewNeuralNetwork(String label, Dimension samplingResolution, ColorMode colorMode, List<String> imageLabels, List<Integer> layersNeuronsCount, TransferFunctionType transferFunctionType) { int numberOfInputNeurons; if ((colorMode == ColorMode.COLOR_RGB) || (colorMode == ColorMode.COLOR_HSL) ){ // for full color rgb or hsl numberOfInputNeurons = 3 * samplingResolution.getWidth() * samplingResolution.getHeight(); } else { // for black n white network numberOfInputNeurons = samplingResolution.getWidth() * samplingResolution.getHeight(); } int numberOfOuputNeurons = imageLabels.size(); layersNeuronsCount.add(0, numberOfInputNeurons); layersNeuronsCount.add(numberOfOuputNeurons); System.out.println("Neuron layer size counts vector = " + layersNeuronsCount); NeuralNetwork neuralNetwork = new MultiLayerPerceptron(layersNeuronsCount, transferFunctionType); neuralNetwork.setLabel(label); PluginBase imageRecognitionPlugin = new ImageRecognitionPlugin(samplingResolution, colorMode); neuralNetwork.addPlugin(imageRecognitionPlugin); assignLabelsToOutputNeurons(neuralNetwork, imageLabels); neuralNetwork.setLearningRule(new MomentumBackpropagation()); return neuralNetwork; }
Example #20
Source File: WineClassificationSample.java From NeurophFramework with Apache License 2.0 | 5 votes |
public void run() { System.out.println("Creating training set..."); // get path to training set String dataSetFile = "data_sets/wine_classification_data.txt"; int inputsCount = 13; int outputsCount = 3; // create training set from file DataSet dataSet = DataSet.createFromFile(dataSetFile, inputsCount, outputsCount, "\t", false); System.out.println("Creating neural network..."); // create MultiLayerPerceptron neural network MultiLayerPerceptron neuralNet = new MultiLayerPerceptron(inputsCount, 22, outputsCount); // attach listener to learning rule MomentumBackpropagation learningRule = (MomentumBackpropagation) neuralNet.getLearningRule(); learningRule.addListener(this); // set learning rate and max error learningRule.setLearningRate(0.2); learningRule.setMaxError(0.01); System.out.println("Training network..."); // train the network with training set neuralNet.learn(dataSet); System.out.println("Training completed."); System.out.println("Testing network..."); testNeuralNetwork(neuralNet, dataSet); System.out.println("Saving network"); // save neural network to file neuralNet.save("MyNeuralNetWineClassification.nnet"); System.out.println("Done."); }
Example #21
Source File: AnimalsClassificationSample.java From NeurophFramework with Apache License 2.0 | 5 votes |
public void run() { System.out.println("Creating training set..."); String dataSetFile = "data_sets/animals_data.txt"; int inputsCount = 20; int outputsCount = 7; // create training set from file DataSet dataSet = DataSet.createFromFile(dataSetFile, inputsCount, outputsCount, "\t", true); System.out.println("Creating neural network..."); // create MultiLayerPerceptron neural network MultiLayerPerceptron neuralNet = new MultiLayerPerceptron(inputsCount, 22, outputsCount); // attach listener to learning rule MomentumBackpropagation learningRule = (MomentumBackpropagation) neuralNet.getLearningRule(); learningRule.addListener(this); // set learning rate and max error learningRule.setLearningRate(0.2); learningRule.setMaxError(0.01); System.out.println("Training network..."); // train the network with training set neuralNet.learn(dataSet); System.out.println("Training completed."); System.out.println("Testing network..."); testNeuralNetwork(neuralNet, dataSet); System.out.println("Saving network"); // save neural network to file neuralNet.save("MyNeuralNetAnimals.nnet"); System.out.println("Done."); }
Example #22
Source File: MlpNetworkTrainer.java From developerWorks with Apache License 2.0 | 5 votes |
/** * Figure out (compute) what the network predicted for the specified simulated historical * game. Returns true if the network predicted correctly, or false if it did not. * * @param network * @param tournamentResult * @param seasonDataHomeTeam * @param seasonDataAwayTeam * @param simulationIndex * @param networkOutput * * @return - Returns true if the network predicted correctly, or false if it did not. */ private boolean computeNetworkPrediction(MultiLayerPerceptron network, TournamentResult tournamentResult, SeasonData[] seasonDataHomeTeam, SeasonData[] seasonDataAwayTeam, int simulationIndex, double[] networkOutput) { boolean ret = false; // Default to LHS (index 0 in the output array) String predictedWinner = seasonDataHomeTeam[simulationIndex].getTeamName(); String predictedLoser = seasonDataAwayTeam[simulationIndex].getTeamName(); Double predictedWinningProbability = networkOutput[0]; Double predictedLosingProbability = networkOutput[1]; NetworkMetrics metrics = networkMetricsCache.get(network); if (networkOutput[1] >= networkOutput[0]) { // Unless RHS (index 1 in the output array) wins predictedWinner = seasonDataAwayTeam[simulationIndex].getTeamName(); predictedLoser = seasonDataHomeTeam[simulationIndex].getTeamName(); predictedWinningProbability = networkOutput[1]; predictedLosingProbability = networkOutput[0]; } // // If the predicted winner is the actual winner, the network picked correctly. if (predictedWinner.equalsIgnoreCase(tournamentResult.getWinningTeamName())) { ret = true; } else { // Otherwise, the network did not pick correctly metrics.getIncorrectPicks().add(tournamentResult); } if (log.isDebugEnabled()) { log.debug( "Predicted results: " + predictedWinner + " should defeat " + predictedLoser + " (W/L Probabilities: " + predictedWinningProbability + " / " + predictedLosingProbability + "), " + "Actual winner: " + tournamentResult.getWinningTeamName() + "(" + tournamentResult.getGameDate() + ": def. " + tournamentResult.getLosingTeamName() + ": score " + tournamentResult.getWinningScore() + "-" + tournamentResult.getLosingScore() + ")"); } return ret; }
Example #23
Source File: MyBenchmarkTask.java From NeurophFramework with Apache License 2.0 | 5 votes |
/** * Benchmrk preparation consists of training set and neural networ creatiion. * This method generates training set with 100 rows, where every row has 10 input and 5 output elements * Neural network has two hiddden layers with 8 and 7 neurons, and runs learning rule for 2000 iterations */ @Override public void prepareTest() { int trainingSetSize = 100; int inputSize = 10; int outputSize = 5; this.trainingSet = new DataSet(inputSize, outputSize); for (int i = 0; i < trainingSetSize; i++) { double[] input = new double[inputSize]; for( int j=0; j<inputSize; j++) input[j] = Math.random(); double[] output = new double[outputSize]; for( int j=0; j<outputSize; j++) output[j] = Math.random(); DataSetRow trainingSetRow = new DataSetRow(input, output); trainingSet.add(trainingSetRow); } network = new MultiLayerPerceptron(inputSize, 8, 7, outputSize); ((MomentumBackpropagation)network.getLearningRule()).setMaxIterations(2000); }
Example #24
Source File: GlassIdentificationSample.java From NeurophFramework with Apache License 2.0 | 5 votes |
public void run() { System.out.println("Creating training set..."); String dataSetFile = "data_sets/glass_identification_data.txt"; int inputsCount = 9; int outputsCount = 7; // create training set from file DataSet dataSet = DataSet.createFromFile(dataSetFile, inputsCount, outputsCount, "\t", false); //dataSet.normalize(); System.out.println("Creating neural network..."); // create MultiLayerPerceptron neural network MultiLayerPerceptron neuralNet = new MultiLayerPerceptron(inputsCount, 22, outputsCount); // attach listener to learning rule MomentumBackpropagation learningRule = (MomentumBackpropagation) neuralNet.getLearningRule(); learningRule.addListener(this); // set learning rate and max error learningRule.setLearningRate(0.1); learningRule.setMaxError(0.01); System.out.println("Training network..."); // train the network with training set neuralNet.learn(dataSet); System.out.println("Training completed."); System.out.println("Testing network..."); testNeuralNetwork(neuralNet, dataSet); System.out.println("Saving network"); // save neural network to file neuralNet.save("MyNeuralGlassIdentification.nnet"); System.out.println("Done."); }
Example #25
Source File: MlpNetworkTrainer.java From developerWorks with Apache License 2.0 | 5 votes |
/** * Using the specified <em>trained</em> <code>network</code>, run a simulation of the two teams in the * specified <code>tournamentResult</code>, to see what happens. For now, we're just trying to see how * good the network is. * * A single execution of this method represents a single simulation of a single historical * tournament game, using a trained network. * * @param network * The trained MLP network to run in the simulation. * * @param incorrectPicks * The number of incorrect picks for this network. Used for reporting. * * @param seasonAnalytics * The {@link SeasonAnalytics} object, used to normalize the data. * * @param tournamentResult * The historical tournament game that will be simulated. Since it * already happened, we know, well, what happened, which is exactly what we need when validating * the network. * * @return int - The number of correct picks for this simulation. */ protected int computeSimulatedGameCorrectPicks(MultiLayerPerceptron network, SeasonAnalytics seasonAnalytics, TournamentResult tournamentResult) { // // Get the network metrics NetworkMetrics metrics = networkMetricsCache.get(network); // // Now simulate a single historical tournament game and see how many picks were correct. int numberOfCorrectPicks = simulateSingleHistoricalTournamentGame(network, seasonAnalytics, tournamentResult); // // Symmetric wins/losses are wins/losses where the same team wins/loses as both home (LHS) and away (RHS). /// This means the relationships in the network picked them as a winner/loser regardless of LHS/RHS, /// so for a symmetric win/loss there is no positional bias (that's the idea, anyway). // An asymmetric win/loss is one where the team one as LHS and lost as RHS (or vice versa). if (numberOfCorrectPicks == 2) { // Increment number of Symmetric wins metrics.setNumberOfSymmetricWinsThisIteration( metrics.getNumberOfSymmetricWinsThisIteration() + numberOfCorrectPicks); metrics.setTotalNumberOfSymmetricWins(metrics.getTotalNumberOfSymmetricWins() + numberOfCorrectPicks); } else if (numberOfCorrectPicks == 0) { // Increment number of symmetric losses metrics .setNumberOfSymmetricLossesThisIteration( metrics.getNumberOfSymmetricLossesThisIteration() + 2); metrics.setTotalNumberOfSymmetricLosses( metrics.getTotalNumberOfSymmetricLosses() + 2); } else { // Increment number of asymmetric wins metrics.setNumberOfAsymmetricWinsThisIteration( metrics.getNumberOfAsymmetricWinsThisIteration() + numberOfCorrectPicks); metrics.setTotalNumberOfAsymmetricWins(metrics.getTotalNumberOfAsymmetricWins() + numberOfCorrectPicks); } return numberOfCorrectPicks; }
Example #26
Source File: PredictingPerformanceOfCPUSample.java From NeurophFramework with Apache License 2.0 | 5 votes |
public void run() { System.out.println("Creating training set..."); String dataSetFile = "data_sets/cpu_data.txt"; int inputsCount = 7; int outputsCount = 1; // create training set from file DataSet dataSet = DataSets.readFromCsv(dataSetFile, inputsCount, outputsCount); // normalize dataset DataSets.normalizeMax(dataSet); System.out.println("Creating neural network..."); // create MultiLayerPerceptron neural network MultiLayerPerceptron neuralNet = new MultiLayerPerceptron(inputsCount, 16, outputsCount); // attach listener to learning rule MomentumBackpropagation learningRule = (MomentumBackpropagation) neuralNet.getLearningRule(); learningRule.addListener(this); // set learning rate and max error learningRule.setLearningRate(0.2); learningRule.setMaxError(0.01); System.out.println("Training network..."); // train the network with training set neuralNet.learn(dataSet); System.out.println("Training completed."); System.out.println("Testing network..."); testNeuralNetwork(neuralNet, dataSet); System.out.println("Saving network"); // save neural network to file neuralNet.save("MyNeuralNetCPU.nnet"); System.out.println("Done."); }
Example #27
Source File: ConcreteStrenghtTestSample.java From NeurophFramework with Apache License 2.0 | 5 votes |
public void run() { System.out.println("Creating training set..."); String dataSetFile = "data_sets/concrete_strenght_test_data.txt"; int inputsCount = 8; int outputsCount = 1; // create training set from file DataSet dataSet = DataSet.createFromFile(dataSetFile, inputsCount, outputsCount, ",", false); System.out.println("Creating neural network..."); // create MultiLayerPerceptron neural network MultiLayerPerceptron neuralNet = new MultiLayerPerceptron(inputsCount, 22, outputsCount); // attach listener to learning rule MomentumBackpropagation learningRule = (MomentumBackpropagation) neuralNet.getLearningRule(); learningRule.addListener(this); // set learning rate and max error learningRule.setLearningRate(0.2); learningRule.setMaxError(0.01); System.out.println("Training network..."); // train the network with training set neuralNet.learn(dataSet); System.out.println("Training completed."); System.out.println("Testing network..."); testNeuralNetwork(neuralNet, dataSet); System.out.println("Saving network"); // save neural network to file neuralNet.save("MyNeuralConcreteStrenght.nnet"); System.out.println("Done."); }
Example #28
Source File: XorResilientPropagationSample.java From NeurophFramework with Apache License 2.0 | 5 votes |
/** * Runs this sample */ public void run() { // create training set (logical XOR function) DataSet trainingSet = new DataSet(2, 1); trainingSet.add(new DataSetRow(new double[]{0, 0}, new double[]{0})); trainingSet.add(new DataSetRow(new double[]{0, 1}, new double[]{1})); trainingSet.add(new DataSetRow(new double[]{1, 0}, new double[]{1})); trainingSet.add(new DataSetRow(new double[]{1, 1}, new double[]{0})); // create multi layer perceptron MultiLayerPerceptron myMlPerceptron = new MultiLayerPerceptron(TransferFunctionType.SIGMOID, 2, 3, 1); // set ResilientPropagation learning rule myMlPerceptron.setLearningRule(new ResilientPropagation()); LearningRule learningRule = myMlPerceptron.getLearningRule(); learningRule.addListener(this); // learn the training set System.out.println("Training neural network..."); myMlPerceptron.learn(trainingSet); int iterations = ((SupervisedLearning)myMlPerceptron.getLearningRule()).getCurrentIteration(); System.out.println("Learned in "+iterations+" iterations"); // test perceptron System.out.println("Testing trained neural network"); testNeuralNetwork(myMlPerceptron, trainingSet); }
Example #29
Source File: TestBinaryClass.java From NeurophFramework with Apache License 2.0 | 5 votes |
public static void main(String[] args) { DataSet trainingSet = new DataSet(2, 1); trainingSet.add(new DataSetRow(new double[]{0, 0}, new double[]{0})); trainingSet.add(new DataSetRow(new double[]{0, 1}, new double[]{1})); trainingSet.add(new DataSetRow(new double[]{1, 0}, new double[]{1})); trainingSet.add(new DataSetRow(new double[]{1, 1}, new double[]{0})); MultiLayerPerceptron neuralNet = new MultiLayerPerceptron(TransferFunctionType.TANH, 2, 3, 1); neuralNet.learn(trainingSet); Evaluation.runFullEvaluation(neuralNet, trainingSet); }
Example #30
Source File: BreastCancerSample.java From NeurophFramework with Apache License 2.0 | 5 votes |
public void run() { System.out.println("Creating training and test set from file..."); String dataSetFile = "data_sets/breast_cancer.txt"; int numInputs = 30; int numOutputs = 1; //Create data set from file DataSet dataSet = DataSet.createFromFile(dataSetFile, numInputs, numOutputs, ","); //Creatinig training set (70%) and test set (30%) DataSet[] trainTestSplit = dataSet.split(0.7, 0.3); DataSet trainingSet = trainTestSplit[0]; DataSet testSet = trainTestSplit[1]; //Normalizing data set Normalizer normalizer = new MaxNormalizer(trainingSet); normalizer.normalize(trainingSet); normalizer.normalize(testSet); //Create MultiLayerPerceptron neural network MultiLayerPerceptron neuralNet = new MultiLayerPerceptron(numInputs, 16, numOutputs); //attach listener to learning rule MomentumBackpropagation learningRule = (MomentumBackpropagation) neuralNet.getLearningRule(); learningRule.addListener(this); learningRule.setLearningRate(0.3); learningRule.setMaxError(0.01); learningRule.setMaxIterations(500); System.out.println("Training network..."); //train the network with training set neuralNet.learn(trainingSet); System.out.println("Testing network..."); testNeuralNetwork(neuralNet, testSet); }