Package com.google.common.hash

Examples of com.google.common.hash.Hasher


    }

    @Override
    public int hashCode() {
        HashFunction hf = Hashing.goodFastHash(32);
        Hasher h = hf.newHasher();
        h.putInt(slots.size());
        for (int i=0; i<slots.size(); i++) {
            h.putInt(slots.get(i).size());
            for (int j=0; j<slots.size(); j++) {
                h.putBytes(slots.get(i).get(j).getLowerRange());
                h.putBytes(slots.get(i).get(j).getUpperRange());
            }
        }
        return h.hash().asInt();
    }
View Full Code Here



    @Override
    public int hashCode() {
        if(goodHashCode == null) {
            Hasher hash = hasher.newHasher().putChar('L').putString(getContent());
            if(getLocale() != null) {
                hash.putString(getLocale().toString());
            }
            if(getType() != null) {
                hash.putString(getType().stringValue());
            }
            goodHashCode = hash.hash();
        }
        return goodHashCode.hashCode();
    }
View Full Code Here

     */
    @Override
    protected  ObjectId _call() {
        Preconditions.checkState(object != null, "Object has not been set.");

        final Hasher hasher = ObjectId.HASH_FUNCTION.newHasher();
        @SuppressWarnings("unchecked")
        final Funnel<RevObject> funnel = (Funnel<RevObject>) FUNNELS[object.getType().value()];
        funnel.funnel(object, hasher);
        final byte[] rawKey = hasher.hash().asBytes();
        final ObjectId id = ObjectId.createNoClone(rawKey);

        return id;
    }
View Full Code Here

    private HashCode getPartitionHash(PartitionContext partitionContext) {
        // The precise implementation of this method isn't particularly important.  There are lots of ways we can hash
        // the data in the PartitionContext.  It just needs to be deterministic and to take into account the values in
        // the PartitionContext for the configured partition keys.
        Hasher hasher = Hashing.md5().newHasher();
        boolean empty = true;
        if (_partitionKeys.isEmpty()) {
            // Use the default context.
            Object value = partitionContext.get();
            if (value != null) {
                hasher.putString(value.toString());
                empty = false;
            }
        }
        for (String partitionKey : _partitionKeys) {
            Object value = partitionContext.get(partitionKey);
            if (value != null) {
                // Include both the key and value in the hash so "reviewId" of 1 and "reviewerId" of 1 hash differently.
                hasher.putString(partitionKey);
                hasher.putString(value.toString());
                empty = false;
            }
        }
        if (empty) {
            // When the partition context has no relevant values that means we should ignore the partition context and
            // don't filter the end points based on partition.  Return null to indicate this.
            return null;
        }
        return hasher.hash();
    }
View Full Code Here

        // randomly generated ranges to each end point.  The individual ranges may vary widely in size, but, with
        // sufficient # of entries per end point, the overall amount of data assigned to each server tends to even out
        // with minimal variation (256 entries per server yields roughly 5% variation in server load).
        List<Integer> list = Lists.newArrayListWithCapacity(_entriesPerEndPoint);
        for (int i = 0; list.size() < _entriesPerEndPoint; i++) {
            Hasher hasher = Hashing.md5().newHasher();
            hasher.putInt(i);
            hasher.putString(endPointId);
            ByteBuffer buf = ByteBuffer.wrap(hasher.hash().asBytes());
            while (buf.hasRemaining() && list.size() < _entriesPerEndPoint) {
                list.add(buf.getInt());
            }
        }
        return list;
View Full Code Here

    }

    @Override
    public int hashCode() {
        HashFunction hf = Hashing.goodFastHash(32);
        Hasher h = hf.newHasher();
        h.putInt(slots.size());
        for (int i=0; i<slots.size(); i++) {
            h.putInt(slots.get(i).size());
            for (int j=0; j<slots.size(); j++) {
                h.putBytes(slots.get(i).get(j).getLowerRange());
                h.putBytes(slots.get(i).get(j).getUpperRange());
            }
        }
        return h.hash().asInt();
    }
View Full Code Here

    @Test
    public void testZoneKeyData()
            throws Exception
    {
        Hasher hasher = Hashing.murmur3_128().newHasher();

        SortedSet<TimeZoneKey> timeZoneKeysSortedByKey = ImmutableSortedSet.copyOf(new Comparator<TimeZoneKey>()
        {
            @Override
            public int compare(TimeZoneKey left, TimeZoneKey right)
            {
                return Short.compare(left.getKey(), right.getKey());
            }
        }, TimeZoneKey.getTimeZoneKeys());

        for (TimeZoneKey timeZoneKey : timeZoneKeysSortedByKey) {
            hasher.putShort(timeZoneKey.getKey());
            hasher.putString(timeZoneKey.getId(), StandardCharsets.UTF_8);
        }
        // Zone file should not (normally) be changed, so let's make is more difficult
        assertEquals(hasher.hash().asLong(), -3500128963570093374L, "zone-index.properties file contents changed!");
    }
View Full Code Here

    if (buf.length() == 0) {
      return null;
    }

    LOG.debug("Creating jar file for coprocessor classes: " + buf.toString());
    final Hasher hasher = Hashing.md5().newHasher();
    final byte[] buffer = new byte[COPY_BUFFER_SIZE];

    final Map<String, URL> dependentClasses = new HashMap<String, URL>();
    for (Class<? extends Coprocessor> clz : classes) {
      Dependencies.findClassDependencies(clz.getClassLoader(), new Dependencies.ClassAcceptor() {
        @Override
        public boolean accept(String className, final URL classUrl, URL classPathUrl) {
          // Assuming the endpoint and protocol class doesn't have dependencies
          // other than those comes with HBase and Java.
          if (className.startsWith("co.cask")) {
            if (!dependentClasses.containsKey(className)) {
              dependentClasses.put(className, classUrl);
            }
            return true;
          }
          return false;
        }
      }, clz.getName());
    }

    if (!dependentClasses.isEmpty()) {
      LOG.debug("Adding " + dependentClasses.size() + " classes to jar");
      File jarFile = File.createTempFile(filePrefix, ".jar");
      try {
        JarOutputStream jarOutput = null;
        try {
          jarOutput = new JarOutputStream(new FileOutputStream(jarFile));
          for (Map.Entry<String, URL> entry : dependentClasses.entrySet()) {
            try {
              jarOutput.putNextEntry(new JarEntry(entry.getKey().replace('.', File.separatorChar) + ".class"));
              InputStream inputStream = entry.getValue().openStream();

              try {
                int len = inputStream.read(buffer);
                while (len >= 0) {
                  hasher.putBytes(buffer, 0, len);
                  jarOutput.write(buffer, 0, len);
                  len = inputStream.read(buffer);
                }
              } finally {
                inputStream.close();
              }
            } catch (IOException e) {
              LOG.info("Error writing to jar", e);
              throw Throwables.propagate(e);
            }
          }
        } finally {
          if (jarOutput != null) {
            jarOutput.close();
          }
        }

        // Copy jar file into HDFS
        // Target path is the jarDir + jarMD5.jar
        final Location targetPath = jarDir.append("coprocessor" + hasher.hash().toString() + ".jar");

        // If the file exists and having same since, assume the file doesn't changed
        if (targetPath.exists() && targetPath.length() == jarFile.length()) {
          return targetPath;
        }
View Full Code Here

     * Use hops rather than stops because:
     * a) arrival at stop zero and departure from last stop are irrelevant
     * b) this hash function needs to stay stable when users switch from 0.10.x to 1.0
     */
    public HashCode semanticHash(HashFunction hashFunction) {
        Hasher hasher = hashFunction.newHasher();
        for (int hop = 0; hop < getNumStops() - 1; hop++) {
            hasher.putInt(getScheduledDepartureTime(hop));
            hasher.putInt(getScheduledArrivalTime(hop + 1));
        }
        return hasher.hash();
    }
View Full Code Here

     * semantic StopPattern, and we don't want to calculate complicated hashes or equality values during normal
     * execution. However, in some cases we want a way to consistently identify trips across versions of a GTFS feed, when the
     * feed publisher cannot ensure stable trip IDs. Therefore we define some additional hash functions.
     */
    public HashCode semanticHash(HashFunction hashFunction) {
        Hasher hasher = hashFunction.newHasher();
        for (int s = 0; s < size; s++) {
            Stop stop = stops[s];
            // Truncate the lat and lon to 6 decimal places in case they move slightly between feed versions
            hasher.putLong((long) (stop.getLat() * 1000000));
            hasher.putLong((long) (stop.getLon() * 1000000));
        }
        // Use hops rather than stops because drop-off at stop 0 and pick-up at last stop are not important
        // and have changed between OTP versions.
        for (int hop = 0; hop < size - 1; hop++) {
            hasher.putInt(pickups[hop]);
            hasher.putInt(dropoffs[hop + 1]);
        }
        return hasher.hash();
    }
View Full Code Here

TOP

Related Classes of com.google.common.hash.Hasher

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.