Package org.neuroph.nnet

Examples of org.neuroph.nnet.MultiLayerPerceptron


        trainingSet.addElement(new SupervisedTrainingElement(new double[]{0, 0}, new double[]{0}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{0, 1}, new double[]{1}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{1, 0}, new double[]{1}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{1, 1}, new double[]{0}));

        MultiLayerPerceptron nnet = new MultiLayerPerceptron( TransferFunctionType.TANH ,2, 3, 1);
        MatrixMultiLayerPerceptron mnet = new MatrixMultiLayerPerceptron(nnet);

        System.out.println("Training network...");

        mnet.learnInSameThread(trainingSet);
View Full Code Here


  public void run() {
   
    // uncomment the following line to use regular Neuroph (non-flat) processing
    Neuroph.getInstance().setFlattenNetworks(false);
   
    NeuralNetwork network = new MultiLayerPerceptron(TransferFunctionType.SIGMOID, WINDOW_SIZE, 10, 1);
     
    normalizeSunspots(0.1, 0.9);
   
    network.getLearningRule().addObserver(this);
   
    TrainingSet training = generateTraining();
    network.learnInSameThread(training);
    predict(network);
   
    Neuroph.getInstance().shutdown();
  }
View Full Code Here

        trainingSet.addElement(new SupervisedTrainingElement(new double[]{0, 1}, new double[]{1}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{1, 0}, new double[]{1}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{1, 1}, new double[]{0}));

        // create multi layer perceptron
        MultiLayerPerceptron myMlPerceptron = new MultiLayerPerceptron(TransferFunctionType.TANH, 2, 3, 1);

        // enable batch if using MomentumBackpropagation
        if( myMlPerceptron.getLearningRule() instanceof MomentumBackpropagation )
          ((MomentumBackpropagation)myMlPerceptron.getLearningRule()).setBatchMode(true);

        // learn the training set
        System.out.println("Training neural network...");
        myMlPerceptron.learnInSameThread(trainingSet);

        // test perceptron
        System.out.println("Testing trained neural network");
        testNeuralNetwork(myMlPerceptron, trainingSet);

        // save trained neural network
        myMlPerceptron.save("myMlPerceptron.nnet");

        // load saved neural network
        NeuralNetwork loadedMlPerceptron = NeuralNetwork.load("myMlPerceptron.nnet");

        // test loaded neural network
View Full Code Here

   * @param transferFunctionType transfer function type for neurons
   * @return instance of Multi Layer Perceptron
   */
  public static MultiLayerPerceptron createMLPerceptron(String layersStr, TransferFunctionType transferFunctionType) {
    ArrayList<Integer> layerSizes = VectorParser.parseInteger(layersStr);
    MultiLayerPerceptron nnet = new MultiLayerPerceptron(layerSizes,
        transferFunctionType);
    return nnet;
  }
View Full Code Here

   * @return instance of Multi Layer Perceptron
   */
  public static MultiLayerPerceptron createMLPerceptron(String layersStr, TransferFunctionType transferFunctionType, Class learningRule,  boolean useBias, boolean connectIO) {
    ArrayList<Integer> layerSizes = VectorParser.parseInteger(layersStr);
                NeuronProperties neuronProperties = new NeuronProperties(transferFunctionType, useBias);
    MultiLayerPerceptron nnet = new MultiLayerPerceptron(layerSizes, neuronProperties);
               
                // set learning rule
                if (learningRule.getName().equals(BackPropagation.class.getName()))  {
                    nnet.setLearningRule(new BackPropagation());
                } else if (learningRule.getName().equals(MomentumBackpropagation.class.getName())) {
                    nnet.setLearningRule(new MomentumBackpropagation());
                } else if (learningRule.getName().equals(DynamicBackPropagation.class.getName())) {
                    nnet.setLearningRule(new DynamicBackPropagation());
                }

                // connect io
                if (connectIO) {
                    nnet.connectInputsToOutputs();
                }

    return nnet;
  }
View Full Code Here

    public static void main(String[] args) {
        System.out.println("Time stamp N1:" + new SimpleDateFormat("dd-MMM-yyyy HH:mm:ss:MM").format(new Date()));

        int maxIterations = 10000;
        NeuralNetwork neuralNet = new MultiLayerPerceptron(4, 9, 1);
        ((LMS) neuralNet.getLearningRule()).setMaxError(0.001);//0-1
        ((LMS) neuralNet.getLearningRule()).setLearningRate(0.7);//0-1
        ((LMS) neuralNet.getLearningRule()).setMaxIterations(maxIterations);//0-1
        TrainingSet trainingSet = new TrainingSet();

        double daxmax = 10000.0D;
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3710.0D / daxmax, 3690.0D / daxmax, 3890.0D / daxmax, 3695.0D / daxmax}, new double[]{3666.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3690.0D / daxmax, 3890.0D / daxmax, 3695.0D / daxmax, 3666.0D / daxmax}, new double[]{3692.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3890.0D / daxmax, 3695.0D / daxmax, 3666.0D / daxmax, 3692.0D / daxmax}, new double[]{3886.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3695.0D / daxmax, 3666.0D / daxmax, 3692.0D / daxmax, 3886.0D / daxmax}, new double[]{3914.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3666.0D / daxmax, 3692.0D / daxmax, 3886.0D / daxmax, 3914.0D / daxmax}, new double[]{3956.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3692.0D / daxmax, 3886.0D / daxmax, 3914.0D / daxmax, 3956.0D / daxmax}, new double[]{3953.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3886.0D / daxmax, 3914.0D / daxmax, 3956.0D / daxmax, 3953.0D / daxmax}, new double[]{4044.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3914.0D / daxmax, 3956.0D / daxmax, 3953.0D / daxmax, 4044.0D / daxmax}, new double[]{3987.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3956.0D / daxmax, 3953.0D / daxmax, 4044.0D / daxmax, 3987.0D / daxmax}, new double[]{3996.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3953.0D / daxmax, 4044.0D / daxmax, 3987.0D / daxmax, 3996.0D / daxmax}, new double[]{4043.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{4044.0D / daxmax, 3987.0D / daxmax, 3996.0D / daxmax, 4043.0D / daxmax}, new double[]{4068.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3987.0D / daxmax, 3996.0D / daxmax, 4043.0D / daxmax, 4068.0D / daxmax}, new double[]{4176.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3996.0D / daxmax, 4043.0D / daxmax, 4068.0D / daxmax, 4176.0D / daxmax}, new double[]{4187.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{4043.0D / daxmax, 4068.0D / daxmax, 4176.0D / daxmax, 4187.0D / daxmax}, new double[]{4223.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{4068.0D / daxmax, 4176.0D / daxmax, 4187.0D / daxmax, 4223.0D / daxmax}, new double[]{4259.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{4176.0D / daxmax, 4187.0D / daxmax, 4223.0D / daxmax, 4259.0D / daxmax}, new double[]{4203.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{4187.0D / daxmax, 4223.0D / daxmax, 4259.0D / daxmax, 4203.0D / daxmax}, new double[]{3989.0D / daxmax}));
        neuralNet.learnInSameThread(trainingSet);
        System.out.println("Time stamp N2:" + new SimpleDateFormat("dd-MMM-yyyy HH:mm:ss:MM").format(new Date()));

        TrainingSet testSet = new TrainingSet();
        testSet.addElement(new TrainingElement(new double[]{4223.0D / daxmax, 4259.0D / daxmax, 4203.0D / daxmax, 3989.0D / daxmax}));

        for (TrainingElement testElement : testSet.trainingElements()) {
            neuralNet.setInput(testElement.getInput());
            neuralNet.calculate();
            double[] networkOutput = neuralNet.getOutput();
            System.out.print("Input: " + Arrays.toString(testElement.getInput()) );
            System.out.println(" Output: " + Arrays.toString(networkOutput) );
        }

        //Experiments:
View Full Code Here

    layersNeuronsCount.add(0, numberOfInputNeurons);
    layersNeuronsCount.add(numberOfOuputNeurons);
   
    System.out.println("Neuron layer size counts vector = " + layersNeuronsCount);
   
    NeuralNetwork neuralNetwork = new MultiLayerPerceptron(layersNeuronsCount, transferFunctionType);

    neuralNetwork.setLabel(label);
    PluginBase imageRecognitionPlugin = new ImageRecognitionPlugin(samplingResolution, colorMode);
    neuralNetwork.addPlugin(imageRecognitionPlugin);

    assignLabelsToOutputNeurons(neuralNetwork, imageLabels);
                neuralNetwork.setLearningRule(new MomentumBackpropagation());

            return neuralNetwork;
  }
View Full Code Here

TOP

Related Classes of org.neuroph.nnet.MultiLayerPerceptron

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.