Examples of DenseDoubleVector


Examples of org.apache.hama.commons.math.DenseDoubleVector

  }

  public static DoubleVector readVector(DataInput in) throws IOException {
    int length = in.readInt();
    DoubleVector vector;
    vector = new DenseDoubleVector(length);
    for (int i = 0; i < length; i++) {
      vector.set(i, in.readDouble());
    }

    if (in.readBoolean()) {
View Full Code Here

Examples of org.apache.hama.commons.math.DenseDoubleVector

    matrices[0] = new DenseDoubleMatrix(5, 3, 0.5);
    matrices[1] = new DenseDoubleMatrix(1, 6, 0.5);
    ann.setWeightMatrices(matrices);

    double[] arr = new double[] { 0, 1 };
    DoubleVector training = new DenseDoubleVector(arr);
    DoubleVector result = ann.getOutput(training);
    assertEquals(1, result.getDimension());
    // assertEquals(3, result.get(0), 0.000001);

    // second network
    SmallLayeredNeuralNetwork ann2 = new SmallLayeredNeuralNetwork();
    ann2.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann2.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann2.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann2.setCostFunction(FunctionFactory
        .createDoubleDoubleFunction("SquaredError"));
    ann2.setLearningRate(0.3);
    // intentionally initialize all weights to 0.5
    DoubleMatrix[] matrices2 = new DenseDoubleMatrix[2];
    matrices2[0] = new DenseDoubleMatrix(3, 3, 0.5);
    matrices2[1] = new DenseDoubleMatrix(1, 4, 0.5);
    ann2.setWeightMatrices(matrices2);

    double[] test = { 0, 0 };
    double[] result2 = { 0.807476 };

    DoubleVector vec = ann2.getOutput(new DenseDoubleVector(test));
    assertArrayEquals(result2, vec.toArray(), 0.000001);

    SmallLayeredNeuralNetwork ann3 = new SmallLayeredNeuralNetwork();
    ann3.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann3.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann3.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann3.setCostFunction(FunctionFactory
        .createDoubleDoubleFunction("SquaredError"));
    ann3.setLearningRate(0.3);
    // intentionally initialize all weights to 0.5
    DoubleMatrix[] initMatrices = new DenseDoubleMatrix[2];
    initMatrices[0] = new DenseDoubleMatrix(3, 3, 0.5);
    initMatrices[1] = new DenseDoubleMatrix(1, 4, 0.5);
    ann3.setWeightMatrices(initMatrices);

    double[] instance = { 0, 1 };
    DoubleVector output = ann3.getOutput(new DenseDoubleVector(instance));
    assertEquals(0.8315410, output.get(0), 0.000001);
  }
View Full Code Here

Examples of org.apache.hama.commons.math.DenseDoubleVector

    int iterations = 50000; // iteration should be set to a very large number
    double[][] instances = { { 0, 1, 1 }, { 0, 0, 0 }, { 1, 0, 1 }, { 1, 1, 0 } };
    for (int i = 0; i < iterations; ++i) {
      DoubleMatrix[] matrices = null;
      for (int j = 0; j < instances.length; ++j) {
        matrices = ann.trainByInstance(new DenseDoubleVector(instances[j
            % instances.length]));
        ann.updateWeightMatrices(matrices);
      }
    }

    for (int i = 0; i < instances.length; ++i) {
      DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
      // the expected output is the last element in array
      double result = instances[i][2];
      double actual = ann.getOutput(input).get(0);
      if (result < 0.5 && actual >= 0.5 || result >= 0.5 && actual < 0.5) {
        Log.info("Neural network failes to lear the XOR.");
      }
    }

    // write model into file and read out
    String modelPath = "/tmp/testSmallLayeredNeuralNetworkXORLocal";
    ann.setModelPath(modelPath);
    try {
      ann.writeModelToFile();
    } catch (IOException e) {
      e.printStackTrace();
    }
    SmallLayeredNeuralNetwork annCopy = new SmallLayeredNeuralNetwork(modelPath);
    // test on instances
    for (int i = 0; i < instances.length; ++i) {
      DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
      // the expected output is the last element in array
      double result = instances[i][2];
      double actual = annCopy.getOutput(input).get(0);
      if (result < 0.5 && actual >= 0.5 || result >= 0.5 && actual < 0.5) {
        Log.info("Neural network failes to lear the XOR.");
View Full Code Here

Examples of org.apache.hama.commons.math.DenseDoubleVector

    int iterations = 2000; // iteration should be set to a very large number
    double[][] instances = { { 0, 1, 1 }, { 0, 0, 0 }, { 1, 0, 1 }, { 1, 1, 0 } };
    for (int i = 0; i < iterations; ++i) {
      for (int j = 0; j < instances.length; ++j) {
        ann.trainOnline(new DenseDoubleVector(instances[j % instances.length]));
      }
    }

    for (int i = 0; i < instances.length; ++i) {
      DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
      // the expected output is the last element in array
      double result = instances[i][2];
      double actual = ann.getOutput(input).get(0);
      if (result < 0.5 && actual >= 0.5 || result >= 0.5 && actual < 0.5) {
        Log.info("Neural network failes to lear the XOR.");
      }
    }

    // write model into file and read out
    String modelPath = "/tmp/testSmallLayeredNeuralNetworkXORLocalWithMomentum";
    ann.setModelPath(modelPath);
    try {
      ann.writeModelToFile();
    } catch (IOException e) {
      e.printStackTrace();
    }
    SmallLayeredNeuralNetwork annCopy = new SmallLayeredNeuralNetwork(modelPath);
    // test on instances
    for (int i = 0; i < instances.length; ++i) {
      DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
      // the expected output is the last element in array
      double result = instances[i][2];
      double actual = annCopy.getOutput(input).get(0);
      if (result < 0.5 && actual >= 0.5 || result >= 0.5 && actual < 0.5) {
        Log.info("Neural network failes to lear the XOR.");
View Full Code Here

Examples of org.apache.hama.commons.math.DenseDoubleVector

    int iterations = 5000; // iteration should be set to a very large number
    double[][] instances = { { 0, 1, 1 }, { 0, 0, 0 }, { 1, 0, 1 }, { 1, 1, 0 } };
    for (int i = 0; i < iterations; ++i) {
      for (int j = 0; j < instances.length; ++j) {
        ann.trainOnline(new DenseDoubleVector(instances[j % instances.length]));
      }
    }

    for (int i = 0; i < instances.length; ++i) {
      DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
      // the expected output is the last element in array
      double result = instances[i][2];
      double actual = ann.getOutput(input).get(0);
      if (result < 0.5 && actual >= 0.5 || result >= 0.5 && actual < 0.5) {
        Log.info("Neural network failes to lear the XOR.");
      }
    }

    // write model into file and read out
    String modelPath = "/tmp/testSmallLayeredNeuralNetworkXORLocalWithRegularization";
    ann.setModelPath(modelPath);
    try {
      ann.writeModelToFile();
    } catch (IOException e) {
      e.printStackTrace();
    }
    SmallLayeredNeuralNetwork annCopy = new SmallLayeredNeuralNetwork(modelPath);
    // test on instances
    for (int i = 0; i < instances.length; ++i) {
      DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
      // the expected output is the last element in array
      double result = instances[i][2];
      double actual = annCopy.getOutput(input).get(0);
      if (result < 0.5 && actual >= 0.5 || result >= 0.5 && actual < 0.5) {
        Log.info("Neural network failes to lear the XOR.");
View Full Code Here

Examples of org.apache.hama.commons.math.DenseDoubleVector

    long start = new Date().getTime();
    int iterations = 1000;
    for (int i = 0; i < iterations; ++i) {
      for (double[] trainingInstance : trainingInstances) {
        ann.trainOnline(new DenseDoubleVector(trainingInstance));
      }
    }
    long end = new Date().getTime();
    Log.info(String.format("Training time: %fs\n",
        (double) (end - start) / 1000));

    double errorRate = 0;
    // calculate the error on test instance
    for (double[] testInstance : testInstances) {
      DoubleVector instance = new DenseDoubleVector(testInstance);
      double expected = instance.get(instance.getDimension() - 1);
      instance = instance.slice(instance.getDimension() - 1);
      double actual = ann.getOutput(instance).get(0);
      if (actual < 0.5 && expected >= 0.5 || actual >= 0.5 && expected < 0.5) {
        ++errorRate;
      }
    }
View Full Code Here

Examples of org.apache.hama.commons.math.DenseDoubleVector

          instanceList.size()));
      trainingInstances = instanceList.subList(0, instanceList.size()
          - testSize);

      for (double[] instance : trainingInstances) {
        DoubleVector vec = new DenseDoubleVector(instance);
        writer.append(new LongWritable(count++), new VectorWritable(vec));
      }
      writer.close();
    } catch (FileNotFoundException e) {
      e.printStackTrace();
    } catch (IOException e) {
      e.printStackTrace();
    } catch (URISyntaxException e) {
      e.printStackTrace();
    }

    // create model
    int dimension = 8;
    SmallLayeredNeuralNetwork ann = new SmallLayeredNeuralNetwork();
    ann.setLearningRate(0.7);
    ann.setMomemtumWeight(0.5);
    ann.setRegularizationWeight(0.1);
    ann.addLayer(dimension, false,
        FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.addLayer(dimension, false,
        FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.addLayer(dimension, false,
        FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.setCostFunction(FunctionFactory
        .createDoubleDoubleFunction("CrossEntropy"));
    ann.setModelPath(modelPath);

    long start = new Date().getTime();
    Map<String, String> trainingParameters = new HashMap<String, String>();
    trainingParameters.put("tasks", "5");
    trainingParameters.put("training.max.iterations", "2000");
    trainingParameters.put("training.batch.size", "300");
    trainingParameters.put("convergence.check.interval", "1000");
    ann.train(tmpDatasetPath, trainingParameters);

    long end = new Date().getTime();

    // validate results
    double errorRate = 0;
    // calculate the error on test instance
    for (double[] testInstance : testInstances) {
      DoubleVector instance = new DenseDoubleVector(testInstance);
      double expected = instance.get(instance.getDimension() - 1);
      instance = instance.slice(instance.getDimension() - 1);
      double actual = ann.getOutput(instance).get(0);
      if (actual < 0.5 && expected >= 0.5 || actual >= 0.5 && expected < 0.5) {
        ++errorRate;
      }
    }
View Full Code Here

Examples of org.apache.hama.commons.math.DenseDoubleVector

          instanceList.size()));
      trainingInstances = instanceList.subList(0, instanceList.size()
          - testSize);

      for (double[] instance : trainingInstances) {
        DoubleVector vec = new DenseDoubleVector(instance);
        writer.append(new LongWritable(count++), new VectorWritable(vec));
      }
      writer.close();
    } catch (FileNotFoundException e) {
      e.printStackTrace();
    } catch (IOException e) {
      e.printStackTrace();
    } catch (URISyntaxException e) {
      e.printStackTrace();
    }

    // create model
    int dimension = 8;
    SmallLayeredNeuralNetwork ann = new SmallLayeredNeuralNetwork();
    ann.setLearningRate(0.7);
    ann.setMomemtumWeight(0.5);
    ann.setRegularizationWeight(0.1);
    ann.addLayer(dimension, false,
        FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.addLayer(dimension, false,
        FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.addLayer(dimension, false,
        FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.setCostFunction(FunctionFactory
        .createDoubleDoubleFunction("CrossEntropy"));
    ann.setModelPath(modelPath);
   
    FeatureTransformer featureTransformer = new DefaultFeatureTransformer();
   
    ann.setFeatureTransformer(featureTransformer);

    long start = new Date().getTime();
    Map<String, String> trainingParameters = new HashMap<String, String>();
    trainingParameters.put("tasks", "5");
    trainingParameters.put("training.max.iterations", "2000");
    trainingParameters.put("training.batch.size", "300");
    trainingParameters.put("convergence.check.interval", "1000");
    ann.train(tmpDatasetPath, trainingParameters);
   

    long end = new Date().getTime();

    // validate results
    double errorRate = 0;
    // calculate the error on test instance
    for (double[] testInstance : testInstances) {
      DoubleVector instance = new DenseDoubleVector(testInstance);
      double expected = instance.get(instance.getDimension() - 1);
      instance = instance.slice(instance.getDimension() - 1);
      instance = featureTransformer.transform(instance);
      double actual = ann.getOutput(instance).get(0);
      if (actual < 0.5 && expected >= 0.5 || actual >= 0.5 && expected < 0.5) {
        ++errorRate;
      }
View Full Code Here

Examples of org.apache.hama.commons.math.DenseDoubleVector

   
    int maxIteration = 2000;
    Random rnd = new Random();
    for (int iteration = 0; iteration < maxIteration; ++iteration) {
      for (int i = 0; i < instances.length; ++i) {
        encoder.trainOnline(new DenseDoubleVector(instances[rnd.nextInt(instances.length)]));
      }
    }

    for (int i = 0; i < instances.length; ++i) {
      DoubleVector encodeVec = encoder.encode(new DenseDoubleVector(
          instances[i]));
      DoubleVector decodeVec = encoder.decode(encodeVec);
      for (int d = 0; d < instances[i].length; ++d) {
        assertEquals(instances[i][d], decodeVec.get(d), 0.1);
      }
View Full Code Here

Examples of org.apache.hama.commons.math.DenseDoubleVector

      e.printStackTrace();
    }
   
    List<DoubleVector> vecInstanceList = new ArrayList<DoubleVector>();
    for (double[] instance : instanceList) {
      vecInstanceList.add(new DenseDoubleVector(instance));
    }
    AutoEncoder encoder = new AutoEncoder(3, 2);
    encoder.setLearningRate(0.05);
    encoder.setMomemtumWeight(0.1);
    int maxIteration = 2000;
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.