Package org.encog.ml.train

Examples of org.encog.ml.train.MLTrain


     
      if( kFold>0 ) {
        trainingData = this.wrapTrainingData(trainingData);
      }
     
      MLTrain train = new ScaledConjugateGradient(
          (BasicNetwork) file.getObject(), trainingData);
     
      if( kFold>0 ) {
        train = this.wrapTrainer(trainingData,train,kFold);
      }
View Full Code Here


     
      if( kFold>0 ) {
        trainingData = this.wrapTrainingData(trainingData);
      }

      MLTrain train = new QuickPropagation((BasicNetwork) file.getObject(),
          trainingData, learningRate);
     
      if( kFold>0 ) {
        train = this.wrapTrainer(trainingData,train,kFold);
      }
View Full Code Here

    final double stopTemp = holder.getDouble(
        MLTrainFactory.PROPERTY_TEMPERATURE_STOP, false, 2);

    final int cycles = holder.getInt(MLTrainFactory.CYCLES, false, 100);

    final MLTrain train = new NeuralSimulatedAnnealing(
        (BasicNetwork) method, score, startTemp, stopTemp, cycles);

    return train;
  }
View Full Code Here

    final MLDataSet trainingSet = new BasicMLDataSet(
        CustomActivation.XOR_INPUT, CustomActivation.XOR_IDEAL);

   
    // train the neural network
    final MLTrain train = new ResilientPropagation(network, trainingSet);
    // reset if improve is less than 1% over 5 cycles
    train.addStrategy(new RequiredImprovementStrategy(5));

    EncogUtility.trainToError(train, 0.01);
   
    EncogUtility.evaluate(network, trainingSet);
View Full Code Here

    final double mutation = holder.getDouble(
        MLTrainFactory.PROPERTY_MUTATION, false, 0.1);
    final double mate = holder.getDouble(MLTrainFactory.PROPERTY_MATE,
        false, 0.25);

    final MLTrain train = new NeuralGeneticAlgorithm((BasicNetwork) method,
        new RangeRandomizer(-1, 1), score, populationSize, mutation,
        mate);

    return train;
  }
View Full Code Here

      final MLDataSet training,
      final String type, final String args) {
   
    for (EncogPluginBase plugin : Encog.getInstance().getPlugins()) {
      if (plugin instanceof EncogPluginService1) {
        MLTrain result = ((EncogPluginService1) plugin).createTraining(
            method, training, type, args);
        if (result != null) {
          return result;
        }
      }
View Full Code Here

    MLDataSet trainingData = XOR.createNoisyXORDataSet(10);
   
    BasicNetwork network = NetworkUtil.createXORNetworkUntrained();
   
    final FoldedDataSet folded = new FoldedDataSet(trainingData);
    final MLTrain train = new ResilientPropagation(network, folded);
    final CrossValidationKFold trainFolded = new CrossValidationKFold(train,4);
   
    EncogUtility.trainToError(trainFolded, 0.2);
   
    XOR.verifyXOR((MLRegression)trainFolded.getMethod(), 0.2);
View Full Code Here

    if( method instanceof MLResettable ) {
      this.getAnalyst().setMethod(method);
    }


    MLTrain train = factory.create(method, trainingSet, type, args);

    if (this.kfold > 0) {
      train = new CrossValidationKFold(train, this.kfold);
    }
View Full Code Here

  public final boolean executeCommand(final String args) {

    this.kfold = obtainCross();
    final MLDataSet trainingSet = obtainTrainingSet();
    MLMethod method = obtainMethod();
    final MLTrain trainer = createTrainer(method, trainingSet);
   
    EncogLogging.log(EncogLogging.LEVEL_DEBUG, "Beginning training");

    performTraining(trainer, method, trainingSet);

    final String resourceID = getProp().getPropertyString(
        ScriptProperties.ML_CONFIG_MACHINE_LEARNING_FILE);
    final File resourceFile = getAnalyst().getScript().resolveFilename(
        resourceID);
    method = trainer.getMethod();
    EncogDirectoryPersistence.saveObject(resourceFile, method);
    EncogLogging.log(EncogLogging.LEVEL_DEBUG, "save to:" + resourceID);

    return getAnalyst().shouldStopCommand();
  }
View Full Code Here

    network.reset();
    new ConsistentRandomizer(-1,1).randomize(network);

    // create training data
    MLDataSet trainingSet = new BasicMLDataSet(XOR_INPUT, XOR_IDEAL);
    final MLTrain train = new ResilientPropagation(network, trainingSet);
    //
    int epoch = 1;
    do {
      train.iteration();
      System.out
          .println("Epoch #" + epoch + " Error:" + train.getError());
      epoch++;
    } while(train.getError() > 0.01 && epoch<5000);
   
   
    EncogUtility.evaluate(network, trainingSet);
  }
View Full Code Here

TOP

Related Classes of org.encog.ml.train.MLTrain

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.