Package org.jblas

Examples of org.jblas.DoubleMatrix$ColumnsAsListView


  protected void gradientUpdateMiniBatch(SGDTrainConfig config, DoubleMatrix x_samples, DoubleMatrix y_samples, SGDParam curr_param) {
    int nbr_sample = x_samples.rows;
    BPParam curr_pbparam = (BPParam)curr_param;
    DoubleMatrix[] activation = new DoubleMatrix[curr_pbparam.nl];
    DoubleMatrix[] l_bias = new DoubleMatrix[curr_pbparam.nl];
    DoubleMatrix avg_hidden = null;
   
    /**
     * feedforward
     */
    activation[0] = x_samples;
    for(int i = 1; i < curr_pbparam.nl; i++) {
      activation[i] = activation[i - 1].mmul(curr_pbparam.w[i - 1].transpose()).addiRowVector(curr_pbparam.b[i - 1]);
      MathUtil.sigmod(activation[i]);
    }
    //sparsity
    if(config.isForceSparsity()) {
      avg_hidden = activation[1].columnSums().divi(nbr_sample);
    }
   
    /**
     * backward
     */
    // 1 last layer
    DoubleMatrix ai = activation[curr_pbparam.nl - 1];
    l_bias[curr_pbparam.nl - 1] = ai.sub(x_samples).muli(ai).muli(ai.neg().addi(1));
   
    //2 back
    for(int i = curr_pbparam.nl - 2; i >= 1; i--) {
      l_bias[i] = l_bias[i + 1].mmul(curr_pbparam.w[i]);
      if(config.isForceSparsity()) {
        DoubleMatrix sparsity_v = avg_hidden.dup();
        for(int k = 0; k < sparsity_v.columns; k++) {
          double roat = config.getSparsity();
          double roat_k = sparsity_v.get(0, k);
          sparsity_v.put(0, k, config.getSparsityBeta()*((1-roat)/(1-roat_k) - roat/roat_k));
        }
        l_bias[i].addiRowVector(sparsity_v);
      }
      ai = activation[i];
      l_bias[i].muli(ai).muli(ai.neg().addi(1));
    }
   
    /**
     * delta
     */
    for(int i = 0; i < curr_pbparam.w.length; i++) {
      DoubleMatrix delta_wi = l_bias[i + 1].transpose().mmul(activation[i]).divi(nbr_sample);
      if(config.isUseRegularization()) {
        //for bp, only use L2
        if(0 != config.getLamada2()) {
            delta_wi.addi(curr_pbparam.w[i].mul(config.getLamada2()));
        }
      }
      curr_pbparam.w[i].subi(delta_wi.muli(config.getLearningRate()));
    }
    for(int i = 0; i < curr_pbparam.b.length; i++) {
      DoubleMatrix delta_bi = l_bias[i + 1].columnSums().divi(nbr_sample);
      curr_pbparam.b[i].subi(delta_bi.transpose().muli(config.getLearningRate()));
    }
  }
View Full Code Here


   
  }

  @Override
  protected void gradientUpdateCG(SGDTrainConfig config, DoubleMatrix x_samples, DoubleMatrix y_samples, SGDParam curr_param) {
    DoubleMatrix curr_w = ((HiddenLayerParam)curr_param).w;
      DoubleMatrix curr_hbias = ((HiddenLayerParam)curr_param).hbias;
      DoubleMatrix curr_vbias = ((HiddenLayerParam)curr_param).vbias;
     
    dAOptimizer daopt = new dAOptimizer(config, x_samples, n_visible, n_hidden, curr_w, curr_hbias, curr_vbias);
        MyConjugateGradient cg = new MyConjugateGradient(daopt, config.getCgInitStepSize());
        cg.setTolerance(config.getCgTolerance());
        try {
View Full Code Here

            logger.error("", e);
        }
  }

  private DoubleMatrix get_corrupted_input(DoubleMatrix x, double p) {
        DoubleMatrix ret = new DoubleMatrix(x.getRows(), x.getColumns());
        for (int i = 0; i < x.getRows(); i++) {
            for (int j = 0; j < x.getColumns(); j++) {
                if (0 != x.get(i, j)) {
                    ret.put(i, j, MathUtil.binomial(1, p));
                }
            }
        }
        return ret;
    }
View Full Code Here

     */
    public static DoubleMatrix convertX2Matrix(List<SampleVector> samples) {
        int row = samples.size();
        int col = samples.get(0).getX().length;

        DoubleMatrix ret = new DoubleMatrix(row, col);
        row = 0;
        for (SampleVector sample: samples) {
            double[] x = sample.getX();
            for (col = 0; col < x.length; col++) {
                ret.put(row, col, x[col]);
            }
            row++;
        }
        return ret;
    }
View Full Code Here

     */
    public static DoubleMatrix convertY2Matrix(List<SampleVector> samples) {
        int row = samples.size();
        int col = samples.get(0).getY().length;

        DoubleMatrix ret = new DoubleMatrix(row, col);
        row = 0;
        for (SampleVector sample: samples) {
            double[] y = sample.getY();
            for (col = 0; col < y.length; col++) {
                ret.put(row, col, y[col]);
            }
            row++;
        }
        return ret;
    }
View Full Code Here

            return -loss;
        }

        @Override
        public void getValueGradient(double[] arg) {
            DoubleMatrix L_vbias = my_samples.sub(z);
            DoubleMatrix L_hbias = L_vbias.mmul(my_w.transpose()).muli(y).muli(y.neg().addi(1));
            DoubleMatrix delta_w = null;
            if (myConfig.isDoCorruption()) {
                delta_w = L_hbias.transpose().mmul(tilde_x).addi(y.transpose().mmul(L_vbias));
            } else {
                delta_w = L_hbias.transpose().mmul(my_samples).addi(y.transpose().mmul(L_vbias));
            }
            if (myConfig.isUseRegularization()) {
        //only L2 for autoencoder
        if (0 != myConfig.getLamada2()) {
          delta_w.subi(my_w.mul(myConfig.getLamada2()));
                }
      }
            delta_w.divi(nbr_sample);
            DoubleMatrix delta_hbias = L_hbias.columnSums().divi(nbr_sample);
            DoubleMatrix delta_vbias = L_vbias.columnSums().divi(nbr_sample);

            int idx = 0;
            for (int i = 0; i < n_hidden; i++) {
                for (int j = 0; j < n_visible; j++) {
                    arg[idx++] = delta_w.get(i, j);
                }
            }
            for (int i = 0; i < n_hidden; i++) {
                arg[idx++] = delta_hbias.get(0, i);
            }
            for (int i = 0; i < n_visible; i++) {
                arg[idx++] = delta_vbias.get(0, i);
            }
        }
View Full Code Here

      }
    }
  }
 
  private DoubleMatrix get_corrupted_input(DoubleMatrix x, double p) {
        DoubleMatrix ret = new DoubleMatrix(x.getRows(), x.getColumns());
        for (int i = 0; i < x.getRows(); i++) {
            for (int j = 0; j < x.getColumns(); j++) {
                if (0 != x.get(i, j)) {
                    ret.put(i, j, MathUtil.binomial(1, p));
                }
            }
        }
        return ret;
    }
View Full Code Here

      /**
       * backward
       */
      // 1 last layer
      DoubleMatrix ai = activation[my_bpparam.nl - 1];
      l_bias[my_bpparam.nl - 1] = ai.sub(activation[0]).muli(ai).muli(ai.neg().addi(1));
     
      //2 back(no layer0 error need)
      for(int i = my_bpparam.nl - 2; i >= 1; i--) {
        l_bias[i] = l_bias[i + 1].mmul(my_bpparam.w[i]);
        if(my_config.isForceSparsity()) {
          DoubleMatrix sparsity_v = avg_hidden.dup();
          for(int k = 0; k < sparsity_v.columns; k++) {
            double roat = my_config.getSparsity();
            double roat_k = sparsity_v.get(0, k);
            sparsity_v.put(0, k, my_config.getSparsityBeta()*((1-roat)/(1-roat_k) - roat/roat_k));
          }
          l_bias[i].addiRowVector(sparsity_v);
        }
        ai = activation[i];
        l_bias[i].muli(ai).muli(ai.neg().addi(1));
      }
     
      /**
       * delta
       */
      int idx = 0;
      for(int i = 0; i < my_bpparam.w.length; i++) {
        DoubleMatrix delta_wi = l_bias[i + 1].transpose().mmul(activation[i]).divi(nbr_samples);
        if(my_config.isUseRegularization()) {
          //for bp, only use L2
          if(0 != my_config.getLamada2()) {
              delta_wi.addi(my_bpparam.w[i].mul(my_config.getLamada2()));
          }
        }
        for(int row = 0; row < delta_wi.rows; row++) {
          for(int col = 0; col < delta_wi.columns; col++) {
            arg[idx++] = -delta_wi.get(row, col);
          }
        }
      }
      for(int i = 0; i < my_bpparam.b.length; i++) {
        DoubleMatrix delta_bi = l_bias[i + 1].columnSums().divi(nbr_samples);
        for(int row = 0; row < delta_bi.rows; row++) {
          for(int col = 0; col < delta_bi.columns; col++) {
            arg[idx++] = -delta_bi.get(row, col);
          }
        }
      }
    }
View Full Code Here

     * Input layer to hidden layer Sigmod output(standalone)
     * @param input Input layer matrix
     * @return Hidden layer output matrix
     */
    public final DoubleMatrix sigmod_output(DoubleMatrix input) {
        DoubleMatrix ret = input.mmul(hlparam.w.transpose()).addiRowVector(hlparam.hbias);
        MathUtil.sigmod(ret);
        return ret;
    }
View Full Code Here

      hlparam.vbias.addi(new_hlparam.vbias.sub(hlparam.vbias).divi(nrModelReplica));
  }
   
    @Override
    protected final double loss(List<SampleVector> samples) {
      DoubleMatrix x = MathUtil.convertX2Matrix(samples);
        DoubleMatrix reconstruct_x = reconstruct(x);
        return MatrixFunctions.powi(reconstruct_x.sub(x), 2).sum();
    }
View Full Code Here

TOP

Related Classes of org.jblas.DoubleMatrix$ColumnsAsListView

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.