Package org.encog.neural.networks.training.propagation.back

Examples of org.encog.neural.networks.training.propagation.back.Backpropagation


    final double learningRate = holder.getDouble(
        MLTrainFactory.PROPERTY_LEARNING_RATE, false, 0.7);
    final double momentum = holder.getDouble(
        MLTrainFactory.PROPERTY_LEARNING_MOMENTUM, false, 0.3);

    return new Backpropagation((BasicNetwork) method, training,
        learningRate, momentum);
  }
View Full Code Here


  {
    MLDataSet trainingData = new BasicMLDataSet(XOR.XOR_INPUT,XOR.XOR_IDEAL);
   
    BasicNetwork network = NetworkUtil.createXORNetworkUntrained();

    MLTrain bprop = new Backpropagation(network, trainingData, 0.7, 0.9);
    NetworkUtil.testTraining(trainingData,bprop,0.01);
  }
View Full Code Here

    BasicNetwork network1 = NetworkUtil.createXORNetworkUntrained();
    BasicNetwork network2 = NetworkUtil.createXORNetworkUntrained();
    MLDataSet trainingData = new BasicMLDataSet(XOR.XOR_INPUT,XOR.XOR_IDEAL);
   
    // train network 1, no continue
    Backpropagation rprop1 = new Backpropagation(network1,trainingData,0.4,0.4);
    rprop1.iteration();
    rprop1.iteration();
    rprop1.iteration();
    rprop1.iteration();
   
    // train network 2, continue
    Backpropagation rprop2 = new Backpropagation(network2,trainingData,0.4,0.4);
    rprop2.iteration();
    rprop2.iteration();
    TrainingContinuation state = rprop2.pause();
    rprop2 = new Backpropagation(network2,trainingData,0.4,0.4);
    rprop2.resume(state);
    rprop2.iteration();
    rprop2.iteration();
   
    // verify weights are the same
    double[] weights1 = NetworkCODEC.networkToArray(network1);
    double[] weights2 = NetworkCODEC.networkToArray(network2);
   
    Assert.assertEquals(rprop1.getError(), rprop2.getError(), 0.01);
    Assert.assertEquals(weights1.length, weights2.length);
    Assert.assertArrayEquals(weights1, weights2, 0.01);
   
  }
View Full Code Here

  /**
   * {@inheritDoc}
   */
  @Override
  public void createTrainer(final boolean singleThreaded) {
    final Propagation train = new Backpropagation(getNetwork(),
        getTraining(), getLearningRate(), getMomentum());

    if (singleThreaded) {
      train.setThreadCount(1);
    } else {
      train.setThreadCount(0);
    }

    for (final Strategy strategy : getStrategies()) {
      train.addStrategy(strategy);
    }

    setTrain(train);
  }
View Full Code Here

public class BackpropagationFactory implements EnsembleTrainFactory {

  @Override
  public MLTrain getTraining(MLMethod mlMethod, MLDataSet trainingData) {
    return (MLTrain) new Backpropagation((BasicNetwork) mlMethod, trainingData);
  }
View Full Code Here

TOP

Related Classes of org.encog.neural.networks.training.propagation.back.Backpropagation

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.