Java Code Examples for org.neuroph.core.NeuralNetwork#setLearningRule()
The following examples show how to use
org.neuroph.core.NeuralNetwork#setLearningRule() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BackPropagationTest.java From NeurophFramework with Apache License 2.0 | 6 votes |
@Test public void testCalculateErrorAndUpdateOutputNeurons() { NeuralNetwork<BackPropagation> nn = new NeuralNetwork<>(); nn.setInputNeurons(new ArrayList<Neuron>() { { add(new Neuron()); add(new Neuron()); } }); nn.setOutputNeurons(new ArrayList<Neuron>() { { add(new Neuron()); } }); nn.setLearningRule(instance); nn.getOutputNeurons().get(0).setDelta(1); instance.calculateErrorAndUpdateOutputNeurons(new double[]{0}); assertTrue(nn.getOutputNeurons().get(0).getDelta() == 0); instance.calculateErrorAndUpdateOutputNeurons(new double[]{0.5}); assertTrue(nn.getOutputNeurons().get(0).getDelta() == 0.5); }
Example 2
Source File: BackPropagationTest.java From NeurophFramework with Apache License 2.0 | 6 votes |
@Test public void testUpdateNetworkWeights() { NeuralNetwork<BackPropagation> nn = new NeuralNetwork<>(); nn.setInputNeurons(new ArrayList<Neuron>() { { add(new Neuron()); add(new Neuron()); } }); nn.setOutputNeurons(new ArrayList<Neuron>() { { add(new Neuron()); } }); nn.setLearningRule(instance); BackPropagation bp1 = Mockito.spy(new BackPropagation()); nn.setLearningRule(bp1); double[] weigths = {1, 2}; bp1.calculateWeightChanges(weigths); Mockito.verify(bp1).calculateErrorAndUpdateOutputNeurons(weigths); Mockito.verify(bp1).calculateErrorAndUpdateHiddenNeurons(); }
Example 3
Source File: BackPropagationTest.java From NeurophFramework with Apache License 2.0 | 5 votes |
@Test public void testCalculateErrorAndUpdateHiddenNeurons() { NeuralNetwork<BackPropagation> nn = new NeuralNetwork<>(); nn.setInputNeurons(new ArrayList<Neuron>() { { add(new Neuron()); add(new Neuron()); } }); nn.setOutputNeurons(new ArrayList<Neuron>() { { add(new Neuron()); } }); nn.setLearningRule(instance); Layer l1 = new Layer(); Layer l2 = new Layer(); Layer l3 = new Layer(); Neuron n = new Neuron(); n.setDelta(0.5); Neuron n1 = new Neuron(); Linear transfer = new Linear(); n1.setTransferFunction(transfer); double weigth = 2; n.addInputConnection(new Connection(n1, n, weigth)); assertTrue(0 == n1.getDelta()); nn.addLayer(l1); nn.addLayer(l2); nn.addLayer(l3); l2.addNeuron(n1); instance.calculateErrorAndUpdateHiddenNeurons(); assertTrue(instance.calculateHiddenNeuronError(n1) == n1.getDelta()); }
Example 4
Source File: ImageRecognitionHelper.java From NeurophFramework with Apache License 2.0 | 5 votes |
/** * Creates and returns new neural network for image recognition. * Assumes that all of the FractionRgbData objects in the given map have identical * length arrays in them so that the input layer of the neural network can be * created here. * * @param label neural network label * @param samplingResolution sampling resolution (image size) * @param imageLabels image labels * @param layersNeuronsCount neuron counts in hidden layers * @param transferFunctionType type of transfer function to use for neurons in network * @param colorMode color mode * @return */ public static NeuralNetwork createNewNeuralNetwork(String label, Dimension samplingResolution, ColorMode colorMode, List<String> imageLabels, List<Integer> layersNeuronsCount, TransferFunctionType transferFunctionType) { int numberOfInputNeurons; if ((colorMode == ColorMode.COLOR_RGB) || (colorMode == ColorMode.COLOR_HSL) ){ // for full color rgb or hsl numberOfInputNeurons = 3 * samplingResolution.getWidth() * samplingResolution.getHeight(); } else { // for black n white network numberOfInputNeurons = samplingResolution.getWidth() * samplingResolution.getHeight(); } int numberOfOuputNeurons = imageLabels.size(); layersNeuronsCount.add(0, numberOfInputNeurons); layersNeuronsCount.add(numberOfOuputNeurons); System.out.println("Neuron layer size counts vector = " + layersNeuronsCount); NeuralNetwork neuralNetwork = new MultiLayerPerceptron(layersNeuronsCount, transferFunctionType); neuralNetwork.setLabel(label); PluginBase imageRecognitionPlugin = new ImageRecognitionPlugin(samplingResolution, colorMode); neuralNetwork.addPlugin(imageRecognitionPlugin); assignLabelsToOutputNeurons(neuralNetwork, imageLabels); neuralNetwork.setLearningRule(new MomentumBackpropagation()); return neuralNetwork; }
Example 5
Source File: Model.java From o2oa with GNU Affero General Public License v3.0 | 5 votes |
public NeuralNetwork<MomentumBackpropagation> createNeuralNetwork(Integer inValueCount, Integer outValueCount, Integer hiddenLayerCount) { NeuronProperties inputNeuronProperties = new NeuronProperties(InputNeuron.class, Linear.class); NeuronProperties hiddenNeuronProperties = new NeuronProperties(InputOutputNeuron.class, WeightedSum.class, Sigmoid.class); NeuronProperties outputNeuronProperties = new NeuronProperties(InputOutputNeuron.class, WeightedSum.class, Sigmoid.class); NeuralNetwork<MomentumBackpropagation> neuralNetwork = new NeuralNetwork<>(); neuralNetwork.setNetworkType(NeuralNetworkType.MULTI_LAYER_PERCEPTRON); Layer inputLayer = LayerFactory.createLayer(inValueCount, inputNeuronProperties); inputLayer.addNeuron(new BiasNeuron()); neuralNetwork.addLayer(inputLayer); List<Integer> hiddenNeurons = this.hiddenNeurons(inValueCount, outValueCount, hiddenLayerCount); for (Integer count : hiddenNeurons) { Layer layer = LayerFactory.createLayer(count, hiddenNeuronProperties); layer.addNeuron(new BiasNeuron()); neuralNetwork.addLayer(layer); } Layer outputLayer = LayerFactory.createLayer(outValueCount, outputNeuronProperties); neuralNetwork.addLayer(outputLayer); for (int i = 0; i < (neuralNetwork.getLayersCount() - 1); i++) { Layer prevLayer = neuralNetwork.getLayers().get(i); Layer nextLayer = neuralNetwork.getLayers().get(i + 1); ConnectionFactory.fullConnect(prevLayer, nextLayer); } neuralNetwork.setLearningRule(this.createMomentumBackpropagation( MapTools.getDouble(this.getPropertyMap(), PROPERTY_MLP_MAXERROR, DEFAULT_MLP_MAXERROR), MapTools.getInteger(this.getPropertyMap(), PROPERTY_MLP_MAXITERATION, DEFAULT_MLP_MAXITERATION), MapTools.getDouble(this.getPropertyMap(), PROPERTY_MLP_LEARNINGRATE, DEFAULT_MLP_LEARNINGRATE), MapTools.getDouble(this.getPropertyMap(), PROPERTY_MLP_MOMENTUM, DEFAULT_MLP_MOMENTUM))); NeuralNetworkFactory.setDefaultIO(neuralNetwork); neuralNetwork.randomizeWeights(); return neuralNetwork; }