Package edu.ucla.sspace.vector

Examples of edu.ucla.sspace.vector.SparseDoubleVector


    /**
     * {@inheritDoc}
     */
    public SparseDoubleVector getRowVector(int row) {
        checkIndices(row, 0);
        SparseDoubleVector vector = new SparseHashDoubleVector(values.length);
        vector.set(row, values[row]);
        return vector;
    }
View Full Code Here


                long start = System.currentTimeMillis();
                Matrix Hprime = new ArrayMatrix(H.rows(), H.columns());
                double s = 0;
                for (int k = 0; k < numDimensions; ++k) {
                    for (int n = 0; n < matrix.rows(); ++n) {
                        SparseDoubleVector v = matrix.getRowVector(n);
                        int[] nonZeros = v.getNonZeroIndices();
                        for (int m : nonZeros)
                            Hprime.set(k, m, Hprime.get(k, m) +
                                             W.get(n,k) * v.get(m));
                    }
                }
                long end = System.currentTimeMillis();
                LOG.info("Step 1: " + (end-start) + "ms");

                // Compute WtW using standard matrix multiplication.
                start = System.currentTimeMillis();
                Matrix WtW = new ArrayMatrix(numDimensions, numDimensions);
                for (int k = 0; k < numDimensions; ++k) {
                    for (int l = 0; l < numDimensions; ++l) {
                        double sum = 0;
                        for (int n = 0; n < W.rows(); ++n)
                            sum += W.get(n, k) * W.get(n, l);
                        WtW.set(k, l, sum);
                    }
                }
                end = System.currentTimeMillis();
                LOG.info("Step 2: " + (end-start) + "ms");

                // Compute the final update to H which is
                // H <- H .* (WtA)./ (WtWH).
                //
                // Do this by computing each cell of WtWH and then let
                //   v <- Hprime[k, m]
                //   w <- H[k, m]
                //   sum <- WtWH[k, m]
                //   Hprime[k,m] <- w * v / sum
                // This saves us from every storing WtWH in memory.  We can
                // store the updated values in Hprime because we only access
                // each cell once, but we cannot use H itself since we need to
                // maintain those values until every value of WtWH is computed.
                start = System.currentTimeMillis();
                for (int k = 0; k < numDimensions; ++k) {
                    for (int m = 0; m < H.columns(); ++m) {
                        double sum = 0;
                        for (int l = 0; l < numDimensions; ++l)
                            sum += WtW.get(k, l) * H.get(l, m);
                        double v = Hprime.get(k, m);
                        double w = H.get(k, m);
                        Hprime.set(k, m, w * v / sum);

                    }
                }
                end = System.currentTimeMillis();
                LOG.info("Step 3: " + (end-start) + "ms");

                // Update H with the new value.
                H = Hprime;
            }

            LOG.info("Updating W matrix");
            // Update the H matrix by holding the W matrix fixed for a few
            // iterations.
            for (int j = 0; j < innerLoop; ++j) {
                // Compute Wprime, which is AHt.  Since A is the left matrix, we
                // can take advantage of it's sparsity using the standard matrix
                // multiplication techniques.
                long start = System.currentTimeMillis();
                Matrix Wprime = new ArrayMatrix(W.rows(), W.columns());
                for (int n = 0; n < matrix.rows(); ++ n) {
                    SparseDoubleVector v = matrix.getRowVector(n);
                    int[] nonZeros = v.getNonZeroIndices();
                    for (int k = 0; k < numDimensions; ++k) {
                        double sum = 0;
                        for (int m : nonZeros)
                            sum += v.get(m) * H.get(k, m);
                        Wprime.set(n, k, sum);
                    }
                }
                long end = System.currentTimeMillis();
                LOG.info("Step 4: " + (end-start) + "ms");
View Full Code Here

        public void processSpace(Properties properties) {
            SparseMatrix cleanedMatrix = (SparseMatrix) transform.transform(
                    cooccurrenceMatrix);
            for (String term : basis.keySet()) {
                int index = basis.getDimension(term);
                SparseDoubleVector sdv = cleanedMatrix.getRowVector(index);

                double score = 0;
                for (int i : sdv.getNonZeroIndices())
                    score += sdv.get(i);

                wordScores.put(term, score);
            }
        }
View Full Code Here

        public void processSpace(Properties properties) {
            SparseMatrix cleanedMatrix = (SparseMatrix) transform.transform(
                    cooccurrenceMatrix);
            for (String term : basis.keySet()) {
                int index = basis.getDimension(term);
                SparseDoubleVector sdv = cleanedMatrix.getRowVector(index);

                double score = 0;
                for (int i : sdv.getNonZeroIndices())
                    score += sdv.get(i);

                wordScores.put(term, score);
            }
        }
View Full Code Here

     * itself, which reflects the similarity of the keystone nod.
     */
    private <E extends WeightedEdge> SparseDoubleVector getVertexWeightVector(
            WeightedGraph<E> g, int vertex) {
        if (keepWeightVectors) {
            SparseDoubleVector weightVec = vertexToWeightVector.get(vertex);
            if (weightVec == null) {
                synchronized(this) {
                    weightVec = vertexToWeightVector.get(vertex);
                    if (weightVec == null) {
                        weightVec = computeWeightVector(g, vertex);
View Full Code Here

    }

    private <E extends WeightedEdge> SparseDoubleVector computeWeightVector(
            WeightedGraph<E> g, int vertex) {

        SparseDoubleVector weightVec = new CompactSparseVector();//  g.order());
        Set<E> adjacent = g.getAdjacencyList(vertex);
       
        // Count how many neighbors have positive edge weights
        // (assume for now that all edges are weighted positive)
        double normalizer = 1d / adjacent.size();
       
        // For each of the neighbors, normalize the positive edge
        // weights by the number of neighbors (with pos. weights)
        for (E e : adjacent) {
            int v = (e.from() == vertex) ? e.to() : e.from();
            weightVec.set(v, normalizer * e.weight());
        }                   
       
        // Last, although the graph is assumed to not have self-loops, the
        // weight for an node to itself is the normalizing constant (1/num
        // positive weights).  This is analogous to the similarity contribution
        // from the keystone node in the unweighted version
        weightVec.set(vertex, normalizer);
        return weightVec;
    }
View Full Code Here

TOP

Related Classes of edu.ucla.sspace.vector.SparseDoubleVector

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.