Package org.encog.neural.networks.training.propagation.back

Examples of org.encog.neural.networks.training.propagation.back.Backpropagation


    // train the neural network
    CalculateScore score = new TrainingSetScore(trainingSet);
    final MLTrain trainAlt = new NeuralSimulatedAnnealing(
        network, score, 10, 2, 100);

    final MLTrain trainMain = new Backpropagation(network, trainingSet,0.000001, 0.0);

    ((Propagation)trainMain).setNumThreads(1);
    final StopTrainingStrategy stop = new StopTrainingStrategy();
    trainMain.addStrategy(new Greedy());
    trainMain.addStrategy(new HybridStrategy(trainAlt));
    trainMain.addStrategy(stop);

    int epoch = 0;
    while (!stop.shouldStop()) {
      trainMain.iteration();
      System.out.println("Training " + what + ", Epoch #" + epoch
          + " Error:" + trainMain.getError());
      epoch++;
    }
    return trainMain.getError();
  }
View Full Code Here


    for(int i=0;i<TRIES;i++) {
   
      MLMethod method = EncogUtility.simpleFeedForward(INPUT_OUTPUT_COUNT,
          HIDDEN_COUNT, 0, INPUT_OUTPUT_COUNT, false);
     
      Propagation train = new Backpropagation((ContainsFlat)method, trainingData,1.7,0);
      //Propagation train = new ResilientPropagation((ContainsFlat)method, trainingData);
      ((Propagation)train).fixFlatSpot(true);
     
      int iteration = 0;
      do {
        train.iteration();
       
        iteration++;
      } while( train.getError()>0.01 );
      count[i] = iteration;
      System.out.println("Begin Try #" + (i+1) + ", took " + iteration + " iterations.");     
    }
   
    System.out.println("Tries: " + TRIES);
View Full Code Here

    network.reset();

    MLDataSet trainingSet = new BasicMLDataSet(input, output);

    // train the neural network
    MLTrain train = new Backpropagation(network, trainingSet, 0.7, 0.7);

    Stopwatch sw = new Stopwatch();
    sw.start();
    // run epoch of learning procedure
    for (int i = 0; i < ITERATIONS; i++) {
      train.iteration();
    }
    sw.stop();

    return sw.getElapsedMilliseconds();
  }
View Full Code Here

    // train the neural network
    CalculateScore score = new TrainingSetScore(trainingSet);
    final MLTrain trainAlt = new NeuralSimulatedAnnealing(
        network, score, 10, 2, 100);

    final MLTrain trainMain = new Backpropagation(network, trainingSet,0.000001, 0.0);

    ((Propagation)trainMain).setNumThreads(1);
    final StopTrainingStrategy stop = new StopTrainingStrategy();
    trainMain.addStrategy(new Greedy());
    trainMain.addStrategy(new HybridStrategy(trainAlt));
    trainMain.addStrategy(stop);

    int epoch = 0;
    while (!stop.shouldStop()) {
      trainMain.iteration();
      System.out.println("Training " + what + ", Epoch #" + epoch
          + " Error:" + trainMain.getError());
      epoch++;
    }
    return trainMain.getError();
  }
View Full Code Here

  /**
   * {@inheritDoc}
   */
  @Override
  public final void createTrainer(final boolean singleThreaded) {
    final Propagation train = new Backpropagation(getNetwork(),
        getTraining(), getLearningRate(), getMomentum());

    if (singleThreaded) {
      train.setNumThreads(1);
    } else {
      train.setNumThreads(0);
    }

    for (final Strategy strategy : getStrategies()) {
      train.addStrategy(strategy);
    }

    setTrain(train);
  }
View Full Code Here

    BasicNetwork network1 = NetworkUtil.createXORNetworkUntrained();
    BasicNetwork network2 = NetworkUtil.createXORNetworkUntrained();
    MLDataSet trainingData = new BasicMLDataSet(XOR.XOR_INPUT,XOR.XOR_IDEAL);
   
    // train network 1, no continue
    Backpropagation rprop1 = new Backpropagation(network1,trainingData,0.4,0.4);
    rprop1.iteration();
    rprop1.iteration();
    rprop1.iteration();
    rprop1.iteration();
   
    // train network 2, continue
    Backpropagation rprop2 = new Backpropagation(network2,trainingData,0.4,0.4);
    rprop2.iteration();
    rprop2.iteration();
    TrainingContinuation state = rprop2.pause();
    rprop2 = new Backpropagation(network2,trainingData,0.4,0.4);
    rprop2.resume(state);
    rprop2.iteration();
    rprop2.iteration();
   
    // verify weights are the same
    double[] weights1 = NetworkCODEC.networkToArray(network1);
    double[] weights2 = NetworkCODEC.networkToArray(network2);
   
    Assert.assertEquals(rprop1.getError(), rprop2.getError(), 0.01);
    Assert.assertEquals(weights1.length, weights2.length);
    Assert.assertArrayEquals(weights1, weights2, 0.01);
   
  }
View Full Code Here

          

          
           BasicNetwork network3 = NetworkUtil.createXORNetworknNguyenWidrowUntrained();
          
           MLTrain bpropNguyen = new Backpropagation( network3, trainingData3, 0.9, 0.8 );    
           train(i, bpropNguyen, "NguyenWidrowRandomizer" );
          
           BasicNetwork network2 = NetworkUtil.createXORNetworkUntrained();
          
           MLTrain bpropRange = new Backpropagation( network2, trainingData2, 0.9, 0.8 );    
           train(i, bpropRange,  "RangeRandomizer       ");
       }
   }
View Full Code Here

  {
    MLDataSet trainingData = new BasicMLDataSet(XOR.XOR_INPUT,XOR.XOR_IDEAL);
   
    BasicNetwork network = NetworkUtil.createXORNetworkUntrained();

    MLTrain bprop = new Backpropagation(network, trainingData, 0.7, 0.9);
    NetworkUtil.testTraining(bprop,0.01);
  }
View Full Code Here

     
      if( kFold>0 ) {
        trainingData = this.wrapTrainingData(trainingData);
      }

      MLTrain train = new Backpropagation((BasicNetwork) file.getObject(),
          trainingData, learningRate, momentum);
     
      if( kFold>0 ) {
        train = this.wrapTrainer(trainingData,train,kFold);
      }
View Full Code Here

    final double learningRate = holder.getDouble(
        MLTrainFactory.PROPERTY_LEARNING_RATE, false, 0.7);
    final double momentum = holder.getDouble(
        MLTrainFactory.PROPERTY_LEARNING_MOMENTUM, false, 0.3);

    return new Backpropagation((BasicNetwork) method, training,
        learningRate, momentum);
  }
View Full Code Here

TOP

Related Classes of org.encog.neural.networks.training.propagation.back.Backpropagation

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.