Package com.google.common.base

Examples of com.google.common.base.Stopwatch


        this.coordinator = coordinator;
        this.sessionId = sessionId;
        this.isLocallyOwned = locallyOwned;
        sessionIdBytes = ByteBufferUtil.bytes(sessionId);
        watch = new Stopwatch();
        watch.start();
    }
View Full Code Here


   }

   public void testCacheIsFasterWhenNoAnnotationPresent() {
      InvocationSuccess keyPairApi = getKeyPairApi();
      ImplicitOptionalConverter fn = forApiVersion("2011-07-15");
      Stopwatch watch = new Stopwatch().start();
      fn.apply(keyPairApi);
      long first = watch.stop().elapsed(TimeUnit.MICROSECONDS);
      watch.reset().start();
      fn.apply(keyPairApi);
      long cached = watch.stop().elapsed(TimeUnit.MICROSECONDS);
      assertTrue(cached < first, String.format("cached [%s] should be less than initial [%s]", cached, first));
      Logger.getAnonymousLogger().info(
            "lookup cache saved " + (first - cached) + " microseconds when no annotation present");
   }
View Full Code Here

   }

   public void testCacheIsFasterWhenAnnotationPresent() {
      InvocationSuccess floatingIpApi = getKeyPairApi();
      ImplicitOptionalConverter fn = forApiVersion("2011-07-15");
      Stopwatch watch = new Stopwatch().start();
      fn.apply(floatingIpApi);
      long first = watch.stop().elapsed(TimeUnit.MICROSECONDS);
      watch.reset().start();
      fn.apply(floatingIpApi);
      long cached = watch.stop().elapsed(TimeUnit.MICROSECONDS);
      assertTrue(cached < first, String.format("cached [%s] should be less than initial [%s]", cached, first));
      Logger.getAnonymousLogger().info(
            "lookup cache saved " + (first - cached) + " microseconds when annotation present");

   }
View Full Code Here

     * @param invalidateAfterRetrieve set it to true if the entry from off heap cache has
     *                                to be invalidated. This would be the case when value loaded is
     *                                made part of L1 cache
     */
    private NodeDocument retrieve(Object key, boolean invalidateAfterRetrieve) {
        Stopwatch watch = Stopwatch.createStarted();

        NodeDocReference value = offHeapCache.getIfPresent(key);
        if (value == null) {
            statsCounter.recordMisses(1);
            return null;
        }

        NodeDocument result = value.getDocument();
        if (result != null) {
            statsCounter.recordLoadSuccess(watch.elapsed(TimeUnit.NANOSECONDS));
        } else {
            statsCounter.recordMisses(1);
        }

        if (invalidateAfterRetrieve) {
View Full Code Here

   
    List<FileStatus> result = null;

    int numThreads = job.getConfiguration().getInt(LIST_STATUS_NUM_THREADS,
        DEFAULT_LIST_STATUS_NUM_THREADS);
    Stopwatch sw = new Stopwatch().start();
    if (numThreads == 1) {
      result = singleThreadedListStatus(job, dirs, inputFilter, recursive);
    } else {
      Iterable<FileStatus> locatedFiles = null;
      try {
        LocatedFileStatusFetcher locatedFileStatusFetcher = new LocatedFileStatusFetcher(
            job.getConfiguration(), dirs, recursive, inputFilter, true);
        locatedFiles = locatedFileStatusFetcher.getFileStatuses();
      } catch (InterruptedException e) {
        throw new IOException("Interrupted while getting file statuses");
      }
      result = Lists.newArrayList(locatedFiles);
    }
   
    sw.stop();
    if (LOG.isDebugEnabled()) {
      LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis());
    }
    LOG.info("Total input paths to process : " + result.size());
    return result;
  }
View Full Code Here

   * Generate the list of files and make them into FileSplits.
   * @param job the job context
   * @throws IOException
   */
  public List<InputSplit> getSplits(JobContext job) throws IOException {
    Stopwatch sw = new Stopwatch().start();
    long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
    long maxSize = getMaxSplitSize(job);

    // generate splits
    List<InputSplit> splits = new ArrayList<InputSplit>();
    List<FileStatus> files = listStatus(job);
    for (FileStatus file: files) {
      Path path = file.getPath();
      long length = file.getLen();
      if (length != 0) {
        BlockLocation[] blkLocations;
        if (file instanceof LocatedFileStatus) {
          blkLocations = ((LocatedFileStatus) file).getBlockLocations();
        } else {
          FileSystem fs = path.getFileSystem(job.getConfiguration());
          blkLocations = fs.getFileBlockLocations(file, 0, length);
        }
        if (isSplitable(job, path)) {
          long blockSize = file.getBlockSize();
          long splitSize = computeSplitSize(blockSize, minSize, maxSize);

          long bytesRemaining = length;
          while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
            int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
            splits.add(makeSplit(path, length-bytesRemaining, splitSize,
                        blkLocations[blkIndex].getHosts(),
                        blkLocations[blkIndex].getCachedHosts()));
            bytesRemaining -= splitSize;
          }

          if (bytesRemaining != 0) {
            int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
            splits.add(makeSplit(path, length-bytesRemaining, bytesRemaining,
                       blkLocations[blkIndex].getHosts(),
                       blkLocations[blkIndex].getCachedHosts()));
          }
        } else { // not splitable
          splits.add(makeSplit(path, 0, length, blkLocations[0].getHosts(),
                      blkLocations[0].getCachedHosts()));
        }
      } else {
        //Create empty hosts array for zero length files
        splits.add(makeSplit(path, 0, length, new String[0]));
      }
    }
    // Save the number of input files for metrics/loadgen
    job.getConfiguration().setLong(NUM_INPUT_FILES, files.size());
    sw.stop();
    if (LOG.isDebugEnabled()) {
      LOG.debug("Total # of splits generated by getSplits: " + splits.size()
          + ", TimeTaken: " + sw.elapsedMillis());
    }
    return splits;
  }
View Full Code Here

    int numThreads = job
        .getInt(
            org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS,
            org.apache.hadoop.mapreduce.lib.input.FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS);
   
    Stopwatch sw = new Stopwatch().start();
    if (numThreads == 1) {
      List<FileStatus> locatedFiles = singleThreadedListStatus(job, dirs, inputFilter, recursive);
      result = locatedFiles.toArray(new FileStatus[locatedFiles.size()]);
    } else {
      Iterable<FileStatus> locatedFiles = null;
      try {
       
        LocatedFileStatusFetcher locatedFileStatusFetcher = new LocatedFileStatusFetcher(
            job, dirs, recursive, inputFilter, false);
        locatedFiles = locatedFileStatusFetcher.getFileStatuses();
      } catch (InterruptedException e) {
        throw new IOException("Interrupted while getting file statuses");
      }
      result = Iterables.toArray(locatedFiles, FileStatus.class);
    }

    sw.stop();
    if (LOG.isDebugEnabled()) {
      LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis());
    }
    LOG.info("Total input paths to process : " + result.length);
    return result;
  }
View Full Code Here

  /** Splits files returned by {@link #listStatus(JobConf)} when
   * they're too big.*/
  public InputSplit[] getSplits(JobConf job, int numSplits)
    throws IOException {
    Stopwatch sw = new Stopwatch().start();
    FileStatus[] files = listStatus(job);
   
    // Save the number of input files for metrics/loadgen
    job.setLong(NUM_INPUT_FILES, files.length);
    long totalSize = 0;                           // compute total size
    for (FileStatus file: files) {                // check we have valid files
      if (file.isDirectory()) {
        throw new IOException("Not a file: "+ file.getPath());
      }
      totalSize += file.getLen();
    }

    long goalSize = totalSize / (numSplits == 0 ? 1 : numSplits);
    long minSize = Math.max(job.getLong(org.apache.hadoop.mapreduce.lib.input.
      FileInputFormat.SPLIT_MINSIZE, 1), minSplitSize);

    // generate splits
    ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
    NetworkTopology clusterMap = new NetworkTopology();
    for (FileStatus file: files) {
      Path path = file.getPath();
      long length = file.getLen();
      if (length != 0) {
        FileSystem fs = path.getFileSystem(job);
        BlockLocation[] blkLocations;
        if (file instanceof LocatedFileStatus) {
          blkLocations = ((LocatedFileStatus) file).getBlockLocations();
        } else {
          blkLocations = fs.getFileBlockLocations(file, 0, length);
        }
        if (isSplitable(fs, path)) {
          long blockSize = file.getBlockSize();
          long splitSize = computeSplitSize(goalSize, minSize, blockSize);

          long bytesRemaining = length;
          while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
            String[][] splitHosts = getSplitHostsAndCachedHosts(blkLocations,
                length-bytesRemaining, splitSize, clusterMap);
            splits.add(makeSplit(path, length-bytesRemaining, splitSize,
                splitHosts[0], splitHosts[1]));
            bytesRemaining -= splitSize;
          }

          if (bytesRemaining != 0) {
            String[][] splitHosts = getSplitHostsAndCachedHosts(blkLocations, length
                - bytesRemaining, bytesRemaining, clusterMap);
            splits.add(makeSplit(path, length - bytesRemaining, bytesRemaining,
                splitHosts[0], splitHosts[1]));
          }
        } else {
          String[][] splitHosts = getSplitHostsAndCachedHosts(blkLocations,0,length,clusterMap);
          splits.add(makeSplit(path, 0, length, splitHosts[0], splitHosts[1]));
        }
      } else {
        //Create empty hosts array for zero length files
        splits.add(makeSplit(path, 0, length, new String[0]));
      }
    }
    sw.stop();
    if (LOG.isDebugEnabled()) {
      LOG.debug("Total # of splits generated by getSplits: " + splits.size()
          + ", TimeTaken: " + sw.elapsedMillis());
    }
    return splits.toArray(new FileSplit[splits.size()]);
  }
View Full Code Here

   * implement {@code loadAll}.
   */
  @Nullable
  Map<K, V> loadAll(Set<? extends K> keys, CacheLoader<? super K, V> loader)
      throws ExecutionException {
    Stopwatch stopwatch = new Stopwatch().start();
    Map<K, V> result;
    boolean success = false;
    try {
      @SuppressWarnings("unchecked") // safe since all keys extend K
      Map<K, V> map = (Map<K, V>) loader.loadAll(keys);
      result = map;
      success = true;
    } catch (UnsupportedLoadingOperationException e) {
      success = true;
      throw e;
    } catch (RuntimeException e) {
      throw new UncheckedExecutionException(e);
    } catch (Exception e) {
      throw new ExecutionException(e);
    } catch (Error e) {
      throw new ExecutionError(e);
    } finally {
      if (!success) {
        globalStatsCounter.recordLoadException(stopwatch.elapsedTime(NANOSECONDS));
      }
    }

    if (result == null) {
      globalStatsCounter.recordLoadException(stopwatch.elapsedTime(NANOSECONDS));
      throw new InvalidCacheLoadException(loader + " returned null map from loadAll");
    }

    stopwatch.stop();
    // TODO(fry): batch by segment
    boolean nullsPresent = false;
    for (Map.Entry<K, V> entry : result.entrySet()) {
      K key = entry.getKey();
      V value = entry.getValue();
      if (key == null || value == null) {
        // delay failure until non-null entries are stored
        nullsPresent = true;
      } else {
        put(key, value);
      }
    }

    if (nullsPresent) {
      globalStatsCounter.recordLoadException(stopwatch.elapsedTime(NANOSECONDS));
      throw new InvalidCacheLoadException(loader + " returned null keys or values from loadAll");
    }

    // TODO(fry): record count of loaded entries
    globalStatsCounter.recordLoadSuccess(stopwatch.elapsedTime(NANOSECONDS));
    return result;
  }
View Full Code Here

            connection.setSchema(query.getSchema());
            long start = System.nanoTime();

            try (Statement statement = connection.createStatement()) {
                TimeLimiter limiter = new SimpleTimeLimiter();
                Stopwatch stopwatch = Stopwatch.createStarted();
                Statement limitedStatement = limiter.newProxy(statement, Statement.class, timeout.toMillis(), TimeUnit.MILLISECONDS);
                try (final ResultSet resultSet = limitedStatement.executeQuery(query.getQuery())) {
                    List<List<Object>> results = limiter.callWithTimeout(getResultSetConverter(resultSet), timeout.toMillis() - stopwatch.elapsed(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS, true);
                    return new QueryResult(State.SUCCESS, null, nanosSince(start), results);
                }
                catch (AssertionError e) {
                    if (e.getMessage().startsWith("unimplemented type:")) {
                        return new QueryResult(State.INVALID, null, null, ImmutableList.<List<Object>>of());
View Full Code Here

TOP

Related Classes of com.google.common.base.Stopwatch

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.