Package org.apache.hama.commons.math

Examples of org.apache.hama.commons.math.DoubleVector


          instanceList.size()));
      trainingInstances = instanceList.subList(0, instanceList.size()
          - testSize);

      for (double[] instance : trainingInstances) {
        DoubleVector vec = new DenseDoubleVector(instance);
        writer.append(new LongWritable(count++), new VectorWritable(vec));
      }
      writer.close();
    } catch (FileNotFoundException e) {
      e.printStackTrace();
    } catch (IOException e) {
      e.printStackTrace();
    } catch (URISyntaxException e) {
      e.printStackTrace();
    }

    // create model
    int dimension = 8;
    SmallLayeredNeuralNetwork ann = new SmallLayeredNeuralNetwork();
    ann.setLearningRate(0.7);
    ann.setMomemtumWeight(0.5);
    ann.setRegularizationWeight(0.1);
    ann.addLayer(dimension, false,
        FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.addLayer(dimension, false,
        FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.addLayer(dimension, false,
        FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.setCostFunction(FunctionFactory
        .createDoubleDoubleFunction("CrossEntropy"));
    ann.setModelPath(modelPath);
   
    FeatureTransformer featureTransformer = new DefaultFeatureTransformer();
   
    ann.setFeatureTransformer(featureTransformer);

    long start = new Date().getTime();
    Map<String, String> trainingParameters = new HashMap<String, String>();
    trainingParameters.put("tasks", "5");
    trainingParameters.put("training.max.iterations", "2000");
    trainingParameters.put("training.batch.size", "300");
    trainingParameters.put("convergence.check.interval", "1000");
    ann.train(tmpDatasetPath, trainingParameters);
   

    long end = new Date().getTime();

    // validate results
    double errorRate = 0;
    // calculate the error on test instance
    for (double[] testInstance : testInstances) {
      DoubleVector instance = new DenseDoubleVector(testInstance);
      double expected = instance.get(instance.getDimension() - 1);
      instance = instance.slice(instance.getDimension() - 1);
      instance = featureTransformer.transform(instance);
      double actual = ann.getOutput(instance).get(0);
      if (actual < 0.5 && expected >= 0.5 || actual >= 0.5 && expected < 0.5) {
        ++errorRate;
      }
View Full Code Here


        encoder.trainOnline(new DenseDoubleVector(instances[rnd.nextInt(instances.length)]));
      }
    }

    for (int i = 0; i < instances.length; ++i) {
      DoubleVector encodeVec = encoder.encode(new DenseDoubleVector(
          instances[i]));
      DoubleVector decodeVec = encoder.decode(encodeVec);
      for (int d = 0; d < instances[i].length; ++d) {
        assertEquals(instances[i][d], decodeVec.get(d), 0.1);
      }
    }

  }
View Full Code Here

      }
    }

    double errorInstance = 0;
    for (DoubleVector vector : vecInstanceList) {
      DoubleVector decoded = encoder.getOutput(vector);
      DoubleVector diff = vector.subtract(decoded);
      double error = diff.dot(diff);
      if (error > 0.1) {
        ++errorInstance;
      }
    }
    Log.info(String.format("Autoecoder error rate: %f%%\n", errorInstance * 100 / vecInstanceList.size()));
View Full Code Here

      // normalize instances
      zeroOneNormalization(instanceList, instanceList.get(0).length);
     
      SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf, path, LongWritable.class, VectorWritable.class);
      for (int i = 0; i < instanceList.size(); ++i) {
        DoubleVector vector = new DenseDoubleVector(instanceList.get(i));
        writer.append(new LongWritable(i), new VectorWritable(vector));
      }
     
      writer.close();
    } catch (FileNotFoundException e) {
      e.printStackTrace();
    } catch (IOException e) {
      e.printStackTrace();
    } catch (URISyntaxException e) {
      e.printStackTrace();
    }
   
    AutoEncoder encoder = new AutoEncoder(3, 2);
    String modelPath = "/tmp/autoencoder-modelpath";
    encoder.setModelPath(modelPath);
    Map<String, String> trainingParams = new HashMap<String, String>();
    encoder.setLearningRate(0.5);
    trainingParams.put("tasks", "5");
    trainingParams.put("training.max.iterations", "3000");
    trainingParams.put("training.batch.size", "200");
    encoder.train(path, trainingParams);
   
    double errorInstance = 0;
    for (double[] instance : instanceList) {
      DoubleVector vector = new DenseDoubleVector(instance);
      DoubleVector decoded = encoder.getOutput(vector);
      DoubleVector diff = vector.subtract(decoded);
      double error = diff.dot(diff);
      if (error > 0.1) {
        ++errorInstance;
      }
    }
    Log.info(String.format("Autoecoder error rate: %f%%\n", errorInstance * 100 / instanceList.size()));
View Full Code Here

    Preconditions.checkArgument(this.layerSizeList.get(0) - 1 == instance
        .getDimension(), String.format(
        "The dimension of input instance should be %d.",
        this.layerSizeList.get(0) - 1));
    // transform the features to another space
    DoubleVector transformedInstance = this.featureTransformer
        .transform(instance);
    // add bias feature
    DoubleVector instanceWithBias = new DenseDoubleVector(
        transformedInstance.getDimension() + 1);
    instanceWithBias.set(0, 0.99999); // set bias to be a little bit less than
                                      // 1.0
    for (int i = 1; i < instanceWithBias.getDimension(); ++i) {
      instanceWithBias.set(i, transformedInstance.get(i - 1));
    }

    List<DoubleVector> outputCache = getOutputInternal(instanceWithBias);
    // return the output of the last layer
    DoubleVector result = outputCache.get(outputCache.size() - 1);
    // remove bias
    return result.sliceUnsafe(1, result.getDimension() - 1);
  }
View Full Code Here

   * @return Cached output of each layer.
   */
  public List<DoubleVector> getOutputInternal(DoubleVector instanceWithBias) {
    List<DoubleVector> outputCache = new ArrayList<DoubleVector>();
    // fill with instance
    DoubleVector intermediateOutput = instanceWithBias;
    outputCache.add(intermediateOutput);

    for (int i = 0; i < this.layerSizeList.size() - 1; ++i) {
      intermediateOutput = forward(i, intermediateOutput);
      outputCache.add(intermediateOutput);
View Full Code Here

   * @return a new vector with the result of the operation.
   */
  protected DoubleVector forward(int fromLayer, DoubleVector intermediateOutput) {
    DoubleMatrix weightMatrix = this.weightMatrixList.get(fromLayer);

    DoubleVector vec = weightMatrix.multiplyVectorUnsafe(intermediateOutput);
    vec = vec.applyToElements(this.squashingFunctionList.get(fromLayer));

    // add bias
    DoubleVector vecWithBias = new DenseDoubleVector(vec.getDimension() + 1);
    vecWithBias.set(0, 1);
    for (int i = 0; i < vec.getDimension(); ++i) {
      vecWithBias.set(i + 1, vec.get(i));
    }
    return vecWithBias;
  }
View Full Code Here

    this.updateWeightMatrices(updateMatrices);
  }

  @Override
  public DoubleMatrix[] trainByInstance(DoubleVector trainingInstance) {
    DoubleVector transformedVector = this.featureTransformer
        .transform(trainingInstance.sliceUnsafe(this.layerSizeList.get(0) - 1));

    int inputDimension = this.layerSizeList.get(0) - 1;
    int outputDimension;
    DoubleVector inputInstance = null;
    DoubleVector labels = null;
    if (this.learningStyle == LearningStyle.SUPERVISED) {
      outputDimension = this.layerSizeList.get(this.layerSizeList.size() - 1);
      // validate training instance
      Preconditions.checkArgument(
          inputDimension + outputDimension == trainingInstance.getDimension(),
          String
              .format(
                  "The dimension of training instance is %d, but requires %d.",
                  trainingInstance.getDimension(), inputDimension
                      + outputDimension));

      inputInstance = new DenseDoubleVector(this.layerSizeList.get(0));
      inputInstance.set(0, 1); // add bias
      // get the features from the transformed vector
      for (int i = 0; i < inputDimension; ++i) {
        inputInstance.set(i + 1, transformedVector.get(i));
      }
      // get the labels from the original training instance
      labels = trainingInstance.sliceUnsafe(inputInstance.getDimension() - 1,
          trainingInstance.getDimension() - 1);
    } else if (this.learningStyle == LearningStyle.UNSUPERVISED) {
      // labels are identical to input features
      outputDimension = inputDimension;
      // validate training instance
      Preconditions.checkArgument(inputDimension == trainingInstance
          .getDimension(), String.format(
          "The dimension of training instance is %d, but requires %d.",
          trainingInstance.getDimension(), inputDimension));

      inputInstance = new DenseDoubleVector(this.layerSizeList.get(0));
      inputInstance.set(0, 1); // add bias
      // get the features from the transformed vector
      for (int i = 0; i < inputDimension; ++i) {
        inputInstance.set(i + 1, transformedVector.get(i));
      }
      // get the labels by copying the transformed vector
      labels = transformedVector.deepCopy();
    }

    List<DoubleVector> internalResults = this.getOutputInternal(inputInstance);
    DoubleVector output = internalResults.get(internalResults.size() - 1);

    // get the training error
    calculateTrainingError(labels,
        output.deepCopy().sliceUnsafe(1, output.getDimension() - 1));

    if (this.trainingMethod.equals(TrainingMethod.GRADIENT_DESCENT)) {
      return this.trainByInstanceGradientDescent(labels, internalResults);
    } else {
      throw new IllegalArgumentException(
View Full Code Here

   * @return The weight update matrices.
   */
  private DoubleMatrix[] trainByInstanceGradientDescent(DoubleVector labels,
      List<DoubleVector> internalResults) {

    DoubleVector output = internalResults.get(internalResults.size() - 1);
    // initialize weight update matrices
    DenseDoubleMatrix[] weightUpdateMatrices = new DenseDoubleMatrix[this.weightMatrixList
        .size()];
    for (int m = 0; m < weightUpdateMatrices.length; ++m) {
      weightUpdateMatrices[m] = new DenseDoubleMatrix(this.weightMatrixList
          .get(m).getRowCount(), this.weightMatrixList.get(m).getColumnCount());
    }
    DoubleVector deltaVec = new DenseDoubleVector(
        this.layerSizeList.get(this.layerSizeList.size() - 1));

    DoubleFunction squashingFunction = this.squashingFunctionList
        .get(this.squashingFunctionList.size() - 1);

    DoubleMatrix lastWeightMatrix = this.weightMatrixList
        .get(this.weightMatrixList.size() - 1);
    for (int i = 0; i < deltaVec.getDimension(); ++i) {
      double costFuncDerivative = this.costFunction.applyDerivative(
          labels.get(i), output.get(i + 1));
      // add regularization
      costFuncDerivative += this.regularizationWeight
          * lastWeightMatrix.getRowVector(i).sum();
      deltaVec.set(i, costFuncDerivative);
      deltaVec.set(
          i,
          deltaVec.get(i)
              * squashingFunction.applyDerivative(output.get(i + 1)));
    }

    // start from previous layer of output layer
    for (int layer = this.layerSizeList.size() - 2; layer >= 0; --layer) {
View Full Code Here

      DenseDoubleMatrix weightUpdateMatrix) {

    // get layer related information
    DoubleFunction squashingFunction = this.squashingFunctionList
        .get(curLayerIdx);
    DoubleVector curLayerOutput = outputCache.get(curLayerIdx);
    DoubleMatrix weightMatrix = this.weightMatrixList.get(curLayerIdx);
    DoubleMatrix prevWeightMatrix = this.prevWeightUpdatesList.get(curLayerIdx);

    // next layer is not output layer, remove the delta of bias neuron
    if (curLayerIdx != this.layerSizeList.size() - 2) {
      nextLayerDelta = nextLayerDelta.slice(1,
          nextLayerDelta.getDimension() - 1);
    }

    DoubleVector delta = weightMatrix.transpose()
        .multiplyVector(nextLayerDelta);
    for (int i = 0; i < delta.getDimension(); ++i) {
      delta.set(
          i,
          delta.get(i)
              * squashingFunction.applyDerivative(curLayerOutput.get(i)));
    }

    // update weights
    for (int i = 0; i < weightUpdateMatrix.getRowCount(); ++i) {
View Full Code Here

TOP

Related Classes of org.apache.hama.commons.math.DoubleVector

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.