Package org.apache.hama.ml.util

Examples of org.apache.hama.ml.util.DefaultFeatureTransformer


  @Override
  final public void setup(
      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, MLPMessage> peer)
      throws IOException, SyncException, InterruptedException {
    conf = peer.getConfiguration();
    featureTransformer = new DefaultFeatureTransformer();
    this.extraSetup(peer);
  }
View Full Code Here


    this.costFunction = FunctionFactory
        .createDoubleDoubleFunction(this.costFunctionName);
    this.squashingFunction = FunctionFactory
        .createDoubleFunction(this.squashingFunctionName);

    this.featureTransformer = new DefaultFeatureTransformer();
  }
View Full Code Here

  protected FeatureTransformer featureTransformer;

  public NeuralNetwork() {
    this.learningRate = DEFAULT_LEARNING_RATE;
    this.modelType = this.getClass().getSimpleName();
    this.featureTransformer = new DefaultFeatureTransformer();
  }
View Full Code Here

    String costFunctionName = "SquaredError";
    int[] layerSizeArray = new int[] { 3, 2, 2, 3 };
    MultiLayerPerceptron mlp = new SmallMultiLayerPerceptron(learningRate,
        regularization, momentum, squashingFunctionName, costFunctionName,
        layerSizeArray);
    FeatureTransformer transformer = new DefaultFeatureTransformer();
    mlp.setFeatureTransformer(transformer);
    try {
      mlp.writeModelToFile(modelPath);
    } catch (IOException e) {
      e.printStackTrace();
    }

    try {
      // read the meta-data
      Configuration conf = new Configuration();
      FileSystem fs = FileSystem.get(conf);
      mlp = new SmallMultiLayerPerceptron(modelPath);
      assertEquals(mlp.getClass().getName(), mlp.getMLPType());
      assertEquals(learningRate, mlp.getLearningRate(), 0.001);
      assertEquals(regularization, mlp.isRegularization(), 0.001);
      assertEquals(layerSizeArray.length, mlp.getNumberOfLayers());
      assertEquals(momentum, mlp.getMomentum(), 0.001);
      assertEquals(squashingFunctionName, mlp.getSquashingFunctionName());
      assertEquals(costFunctionName, mlp.getCostFunctionName());
      assertArrayEquals(layerSizeArray, mlp.getLayerSizeArray());
      assertEquals(transformer.getClass().getName(), mlp.getFeatureTransformer().getClass().getName());
      // delete test file
      fs.delete(new Path(modelPath), true);
    } catch (IOException e) {
      e.printStackTrace();
    }
View Full Code Here

      for (DoubleMatrix mat : matrices) {
        MatrixWritable.write(mat, output);
      }

      // serialize the feature transformer
      FeatureTransformer transformer = new DefaultFeatureTransformer();
      Class<? extends FeatureTransformer> featureTransformerCls = transformer.getClass();
      byte[] featureTransformerBytes = SerializationUtils.serialize(featureTransformerCls);
      output.writeInt(featureTransformerBytes.length);
      output.write(featureTransformerBytes);
     
      output.close();
View Full Code Here

    matrices[0] = new DenseDoubleMatrix(5, 3, 0.2);
    matrices[1] = new DenseDoubleMatrix(1, 6, 0.8);
    ann.setWeightMatrices(matrices);
    ann.setLearningStyle(LearningStyle.UNSUPERVISED);
   
    FeatureTransformer defaultFeatureTransformer = new DefaultFeatureTransformer();
    ann.setFeatureTransformer(defaultFeatureTransformer);
   

    // write to file
    String modelPath = "/tmp/testSmallLayeredNeuralNetworkReadWrite";
    ann.setModelPath(modelPath);
    try {
      ann.writeModelToFile();
    } catch (IOException e) {
      e.printStackTrace();
    }

    // read from file
    SmallLayeredNeuralNetwork annCopy = new SmallLayeredNeuralNetwork(modelPath);
    assertEquals(annCopy.getClass().getSimpleName(), annCopy.getModelType());
    assertEquals(modelPath, annCopy.getModelPath());
    assertEquals(learningRate, annCopy.getLearningRate(), 0.000001);
    assertEquals(momentumWeight, annCopy.getMomemtumWeight(), 0.000001);
    assertEquals(regularizationWeight, annCopy.getRegularizationWeight(),
        0.000001);
    assertEquals(TrainingMethod.GRADIENT_DESCENT, annCopy.getTrainingMethod());
    assertEquals(LearningStyle.UNSUPERVISED, annCopy.getLearningStyle());

    // compare weights
    DoubleMatrix[] weightsMatrices = annCopy.getWeightMatrices();
    for (int i = 0; i < weightsMatrices.length; ++i) {
      DoubleMatrix expectMat = matrices[i];
      DoubleMatrix actualMat = weightsMatrices[i];
      for (int j = 0; j < expectMat.getRowCount(); ++j) {
        for (int k = 0; k < expectMat.getColumnCount(); ++k) {
          assertEquals(expectMat.get(j, k), actualMat.get(j, k), 0.000001);
        }
      }
    }
   
    FeatureTransformer copyTransformer = annCopy.getFeatureTransformer();
    assertEquals(defaultFeatureTransformer.getClass().getName(), copyTransformer.getClass().getName());
  }
View Full Code Here

    ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.setCostFunction(FunctionFactory
        .createDoubleDoubleFunction("CrossEntropy"));
    ann.setModelPath(modelPath);
   
    FeatureTransformer featureTransformer = new DefaultFeatureTransformer();
   
    ann.setFeatureTransformer(featureTransformer);

    long start = new Date().getTime();
    Map<String, String> trainingParameters = new HashMap<String, String>();
    trainingParameters.put("tasks", "5");
    trainingParameters.put("training.max.iterations", "2000");
    trainingParameters.put("training.batch.size", "300");
    trainingParameters.put("convergence.check.interval", "1000");
    ann.train(tmpDatasetPath, trainingParameters);
   

    long end = new Date().getTime();

    // validate results
    double errorRate = 0;
    // calculate the error on test instance
    for (double[] testInstance : testInstances) {
      DoubleVector instance = new DenseDoubleVector(testInstance);
      double expected = instance.get(instance.getDimension() - 1);
      instance = instance.slice(instance.getDimension() - 1);
      instance = featureTransformer.transform(instance);
      double actual = ann.getOutput(instance).get(0);
      if (actual < 0.5 && expected >= 0.5 || actual >= 0.5 && expected < 0.5) {
        ++errorRate;
      }
    }
View Full Code Here

  protected FeatureTransformer featureTransformer;

  public NeuralNetwork() {
    this.learningRate = DEFAULT_LEARNING_RATE;
    this.modelType = this.getClass().getSimpleName();
    this.featureTransformer = new DefaultFeatureTransformer();
  }
View Full Code Here

TOP

Related Classes of org.apache.hama.ml.util.DefaultFeatureTransformer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.