Package de.jungblut.math

Examples of de.jungblut.math.DoubleMatrix


  @Override
  public void train(DoubleVector[] features, DoubleVector[] outcome) {
    Preconditions.checkArgument(features.length == outcome.length,
        "Features and Outcomes need to match in length!");
    DoubleMatrix x = null;
    DoubleMatrix y = null;
    // add the bias
    if (features[0].isSparse()) {
      x = new SparseDoubleRowMatrix(DenseDoubleVector.ones(features.length),
          new SparseDoubleRowMatrix(features));
    } else {
      x = new DenseDoubleMatrix(DenseDoubleVector.ones(features.length),
          new DenseDoubleMatrix(features));
    }
    if (outcome[0].isSparse()) {
      y = new SparseDoubleRowMatrix(outcome);
    } else {
      y = new DenseDoubleMatrix(outcome);
    }
    // transpose y to get a faster lookup in the cost function
    y = y.transpose();

    LogisticRegressionCostFunction cnf = new LogisticRegressionCostFunction(x,
        y, lambda);

    // random init theta
    theta = new DenseDoubleVector(x.getColumnCount() * y.getRowCount());
    for (int i = 0; i < theta.getDimension(); i++) {
      theta.set(i, (random.nextDouble() * 2) - 1d);
    }
    theta = minimizer.minimize(cnf, theta, numIterations, verbose);
  }
View Full Code Here


   * @param cols the number of columns the target matrix needs to have.
   * @return a matrix with the contents of the vector, row split.
   */
  public static DoubleMatrix unfoldMatrix(DoubleVector vector, int rows,
      int cols) {
    DoubleMatrix mat = new DenseDoubleMatrix(rows, cols);

    int index = 0;
    for (int i = 0; i < rows; i++) {
      for (int j = 0; j < cols; j++) {
        mat.set(i, j, vector.get(index++));
      }
    }

    return mat;
  }
View Full Code Here

      int start = r.getStart();
      int end = r.getEnd(); // inclusive
      DoubleVector[] featureSubArray = ArrayUtils.subArray(inputMatrix, start,
          end);
      boolean sparse = featureSubArray[0].isSparse();
      DoubleMatrix outcomeMat = null;
      if (outcomeMatrix != null) {
        DoubleVector[] outcomeSubArray = ArrayUtils.subArray(outcomeMatrix,
            start, end);
        outcomeMat = new DenseDoubleMatrix(outcomeSubArray);
      }
      DenseDoubleVector bias = DenseDoubleVector.ones(featureSubArray.length);
      DoubleMatrix featureMatrix = sparse ? new SparseDoubleRowMatrix(
          featureSubArray) : new DenseDoubleMatrix(featureSubArray);
      DoubleMatrix featuresWithBias = sparse ? new SparseDoubleRowMatrix(bias,
          featureMatrix) : new DenseDoubleMatrix(bias, featureMatrix);
      batches.add(new Tuple<>(featuresWithBias, outcomeMat));
    }
  }
View Full Code Here

    this.alpha = alpha;
    this.weights = new SparseDoubleRowMatrix(numFeatures, numOutcomes);
  }

  public void train(Iterable<Tuple<DoubleVector, DoubleVector>> dataStream) {
    DoubleMatrix theta = this.weights;
    initWeights(dataStream, theta);
    for (int epoch = 0; epoch < epochs; epoch++) {
      double lossSum = 0d;
      int localItems = 0;
      for (Tuple<DoubleVector, DoubleVector> tuple : dataStream) {
        localItems++;
        DoubleVector feature = tuple.getFirst();
        DoubleVector outcome = tuple.getSecond();
        DoubleVector z1 = theta.multiplyVectorColumn(feature);
        DoubleVector activations = SIGMOID.apply(z1);
        double loss = LOSS.calculateError(
            new SparseDoubleRowMatrix(Arrays.asList(outcome)),
            new SparseDoubleRowMatrix(Arrays.asList(activations)));
        lossSum += loss;
        DoubleVector activationDifference = activations.subtract(outcome);
        // update theta by a smarter sparsity algorithm
        Iterator<DoubleVectorElement> featureIterator = feature
            .iterateNonZero();
        while (featureIterator.hasNext()) {
          DoubleVectorElement next = featureIterator.next();
          DoubleVector rowVector = theta.getRowVector(next.getIndex());
          double l2 = rowVector.pow(2d).sum();
          Iterator<DoubleVectorElement> diffIterator = activationDifference
              .iterateNonZero();
          while (diffIterator.hasNext()) {
            DoubleVectorElement diffElement = diffIterator.next();
View Full Code Here

  public CostGradientTuple evaluateCost(DoubleVector theta) {

    DoubleVector activation = SIGMOID.get().apply(x.multiplyVectorRow(theta));
    DenseDoubleMatrix hypo = new DenseDoubleMatrix(Arrays.asList(activation));
    double error = ERROR_FUNCTION.calculateError(y, hypo);
    DoubleMatrix loss = hypo.subtract(y);
    double j = error / m;
    DoubleVector gradient = xTransposed.multiplyVectorRow(loss.getRowVector(0))
        .divide(m);
    if (lambda != 0d) {
      DoubleVector reg = theta.multiply(lambda / m);
      // don't regularize the bias
      reg.set(0, 0d);
View Full Code Here

          miniBatchSize, batchParallelism, layerSizes[i], activationFunction,
          type, lambda, seed, stochastic);
      DoubleVector theta = minimizer.minimize(fnc, folded, numIterations,
          verbose);
      // get back our weights as a matrix
      DoubleMatrix thetaMat = DenseMatrixFolder.unfoldMatrices(theta,
          fnc.getUnfoldParameters())[0];
      weights[i] = thetaMat;
      // now we can get our new training set for the next stack
      if (i + 1 != layerSizes.length) {
        for (int row = 0; row < currentTrainingSet.length; row++) {
View Full Code Here

    DoubleMatrix[] deltaX = new DoubleMatrix[conf.layerSizes.length];
    // set the last delta to the difference of outcome and prediction
    deltaX[deltaX.length - 1] = ax[conf.layerSizes.length - 1].subtract(y);
    // compute the deltas onto the input layer
    for (int i = (conf.layerSizes.length - 2); i > 0; i--) {
      DoubleMatrix slice = thetas[i].slice(0, thetas[i].getRowCount(), 1,
          thetas[i].getColumnCount());
      deltaX[i] = multiply(deltaX[i + 1], slice, false, false, conf);
      // apply the gradient of the activations
      deltaX[i] = deltaX[i].multiplyElementWise(conf.activations[i]
          .gradient(zx[i]));
View Full Code Here

  public static void calculateGradients(DoubleMatrix[] thetas,
      DoubleMatrix[] thetaGradients, DoubleMatrix[] ax, DoubleMatrix[] deltaX,
      final int m, NetworkConfiguration conf) {
    // calculate the gradients of the weights
    for (int i = 0; i < thetaGradients.length; i++) {
      DoubleMatrix gradDXA = multiply(deltaX[i + 1], ax[i], true, false, conf);
      if (m != 1) {
        thetaGradients[i] = gradDXA.divide(m);
      } else {
        thetaGradients[i] = gradDXA;
      }
      if (conf.lambda != 0d) {
        thetaGradients[i] = thetaGradients[i].add((thetas[i]
View Full Code Here

    return newInstance;
  }

  @Override
  public DoubleMatrix apply(DoubleMatrix matrix) {
    DoubleMatrix newInstance = newInstance(matrix);
    if (matrix.isSparse()) {
      // if we have a sparse matrix, it is more efficient to loop over the
      // sparse row vectors
      int[] rows = matrix.rowIndices();
      for (int row : rows) {
        DoubleVector rowVector = matrix.getRowVector(row);
        if (rowVector.getLength() > 0) {
          DoubleVector apply = apply(rowVector);
          newInstance.setRowVector(row, apply);
        }
      }
    } else {
      // on dense matrices we can be faster by directly looping over the items
      for (int i = 0; i < matrix.getRowCount(); i++) {
        for (int j = 0; j < matrix.getColumnCount(); j++) {
          newInstance.set(i, j, apply(matrix.get(i, j)));
        }
      }
    }
    return newInstance;
  }
View Full Code Here

    return newInstance;
  }

  @Override
  public DoubleMatrix gradient(DoubleMatrix matrix) {
    DoubleMatrix newInstance = newInstance(matrix);
    if (matrix.isSparse()) {
      // if we have a sparse matrix, it is more efficient to loop over the
      // sparse column vectors
      int[] columnIndices = matrix.columnIndices();
      for (int col : columnIndices) {
        newInstance.setColumnVector(col, gradient(matrix.getColumnVector(col)));
      }
    } else {
      // on dense matrices we can be faster by directly looping over the items
      for (int i = 0; i < matrix.getRowCount(); i++) {
        for (int j = 0; j < matrix.getColumnCount(); j++) {
          newInstance.set(i, j, gradient(matrix.get(i, j)));
        }
      }
    }
    return newInstance;
  }
View Full Code Here

TOP

Related Classes of de.jungblut.math.DoubleMatrix

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.