Package org.apache.accumulo.trace.instrument

Examples of org.apache.accumulo.trace.instrument.Span


    String error = null;
    int tooManyFilesCount = 0;
   
    List<KeyValue> results = null;
   
    Span span = Trace.start("scan");
    try {
      while (results == null && !scanState.finished) {
       
        if ((System.currentTimeMillis() - startTime) / 1000.0 > timeOut)
          throw new ScanTimedOutException();
       
        while (loc == null) {
          long currentTime = System.currentTimeMillis();
          if ((currentTime - startTime) / 1000.0 > timeOut)
            throw new ScanTimedOutException();
         
          Span locateSpan = Trace.start("scan:locateTablet");
          try {
            loc = TabletLocator.getInstance(instance, scanState.tableName).locateTablet(scanState.startRow, scanState.skipStartRow, false, credentials);
            if (loc == null) {
              if (!Tables.exists(instance, scanState.tableName.toString()))
                throw new TableDeletedException(scanState.tableName.toString());
              else if (Tables.getTableState(instance, scanState.tableName.toString()) == TableState.OFFLINE)
                throw new TableOfflineException(instance, scanState.tableName.toString());
             
              error = "Failed to locate tablet for table : " + scanState.tableName + " row : " + scanState.startRow;
              if (!error.equals(lastError))
                log.debug(error);
              else if (log.isTraceEnabled())
                log.trace(error);
              lastError = error;
              UtilWaitThread.sleep(100);
            } else {
              // when a tablet splits we do want to continue scanning the low child
              // of the split if we are already passed it
              Range dataRange = loc.tablet_extent.toDataRange();
             
              if (scanState.range.getStartKey() != null && dataRange.afterEndKey(scanState.range.getStartKey())) {
                // go to the next tablet
                scanState.startRow = loc.tablet_extent.getEndRow();
                scanState.skipStartRow = true;
                loc = null;
              } else if (scanState.range.getEndKey() != null && dataRange.beforeStartKey(scanState.range.getEndKey())) {
                // should not happen
                throw new RuntimeException("Unexpected tablet, extent : " + loc.tablet_extent + "  range : " + scanState.range + " startRow : "
                    + scanState.startRow);
              }
            }
          } catch (AccumuloServerException e) {
            log.debug("Scan failed, server side exception : " + e.getMessage());
            throw e;
          } catch (AccumuloException e) {
            error = "exception from tablet loc " + e.getMessage();
            if (!error.equals(lastError))
              log.debug(error);
            else if (log.isTraceEnabled())
              log.trace(error);
           
            lastError = error;
            UtilWaitThread.sleep(100);
          } finally {
            locateSpan.stop();
          }
        }
       
        Span scanLocation = Trace.start("scan:location");
        scanLocation.data("tserver", loc.tablet_location);
        try {
          results = scan(loc, scanState, conf);
        } catch (AccumuloSecurityException e) {
          Tables.clearCache(instance);
          if (!Tables.exists(instance, scanState.tableName.toString()))
            throw new TableDeletedException(scanState.tableName.toString());
          throw e;
        } catch (TApplicationException tae) {
          throw new AccumuloServerException(loc.tablet_location, tae);
        } catch (NotServingTabletException e) {
          error = "Scan failed, not serving tablet " + loc;
          if (!error.equals(lastError))
            log.debug(error);
          else if (log.isTraceEnabled())
            log.trace(error);
          lastError = error;
         
          TabletLocator.getInstance(instance, scanState.tableName).invalidateCache(loc.tablet_extent);
          loc = null;
         
          // no need to try the current scan id somewhere else
          scanState.scanID = null;
         
          if (scanState.isolated)
            throw new IsolationException();
         
          UtilWaitThread.sleep(100);
        } catch (NoSuchScanIDException e) {
          error = "Scan failed, no such scan id " + scanState.scanID + " " + loc;
          if (!error.equals(lastError))
            log.debug(error);
          else if (log.isTraceEnabled())
            log.trace(error);
          lastError = error;
         
          if (scanState.isolated)
            throw new IsolationException();
         
          scanState.scanID = null;
        } catch (TooManyFilesException e) {
          error = "Tablet has too many files " + loc + " retrying...";
          if (!error.equals(lastError)) {
            log.debug(error);
            tooManyFilesCount = 0;
          } else {
            tooManyFilesCount++;
            if (tooManyFilesCount == 300)
              log.warn(error);
            else if (log.isTraceEnabled())
              log.trace(error);
          }
          lastError = error;
         
          // not sure what state the scan session on the server side is
          // in after this occurs, so lets be cautious and start a new
          // scan session
          scanState.scanID = null;
         
          if (scanState.isolated)
            throw new IsolationException();
         
          UtilWaitThread.sleep(100);
        } catch (TException e) {
          TabletLocator.getInstance(instance, scanState.tableName).invalidateCache(loc.tablet_location);
          error = "Scan failed, thrift error " + e.getClass().getName() + "  " + e.getMessage() + " " + loc;
          if (!error.equals(lastError))
            log.debug(error);
          else if (log.isTraceEnabled())
            log.trace(error);
          lastError = error;
          loc = null;
         
          // do not want to continue using the same scan id, if a timeout occurred could cause a batch to be skipped
          // because a thread on the server side may still be processing the timed out continue scan
          scanState.scanID = null;
         
          if (scanState.isolated)
            throw new IsolationException();
         
          UtilWaitThread.sleep(100);
        } finally {
          scanLocation.stop();
        }
      }
     
      if (results != null && results.size() == 0 && scanState.finished) {
        results = null;
View Full Code Here


    }
   
    public void run() {
      minorCompactionWaitingToStart = false;
      minorCompactionInProgress = true;
      Span minorCompaction = Trace.on("minorCompaction");
      try {
        String newMapfileLocation = getNextMapFilename(mergeFile == null ? "F" : "M");
        Span span = Trace.start("waitForCommits");
        synchronized (Tablet.this) {
          commitSession.waitForCommitsToFinish();
        }
        span.stop();
        span = Trace.start("start");
        while (true) {
          try {
            // the purpose of the minor compaction start event is to keep track of the filename... in the case
            // where the metadata table write for the minor compaction finishes and the process dies before
            // writing the minor compaction finish event, then the start event+filename in metadata table will
            // prevent recovery of duplicate data... the minor compaction start event could be written at any time
            // before the metadata write for the minor compaction
            tabletServer.minorCompactionStarted(commitSession, commitSession.getWALogSeq() + 1, newMapfileLocation);
            break;
          } catch (IOException e) {
            log.warn("Failed to write to write ahead log " + e.getMessage(), e);
          }
        }
        span.stop();
        span = Trace.start("compact");
        this.stats = minorCompact(conf, fs, tabletMemory.getMinCMemTable(), newMapfileLocation + "_tmp", newMapfileLocation, mergeFile, true, queued,
            commitSession, flushId, mincReason);
        span.stop();
       
        if (needsSplit()) {
          tabletServer.executeSplit(Tablet.this);
        } else {
          initiateMajorCompaction(MajorCompactionReason.NORMAL);
View Full Code Here

    // the write operation as it is occurs asynchronously. You can optionally create additional Spans
    // within a given Trace as seen below around the flush
    Trace.on("Client Write");

    batchWriter.addMutation(m);
    Span flushSpan = Trace.start("Client Flush");
    batchWriter.flush();
    flushSpan.stop();

    // Use Trace.offNoFlush() if you don't want the operation to block.
    Trace.off();

    batchWriter.close();
View Full Code Here

  private void readEntries(Opts opts) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {

    Scanner scanner = opts.getConnector().createScanner(opts.getTableName(), opts.auths);

    // Trace the read operation.
    Span readSpan = Trace.on("Client Read");

    int numberOfEntriesRead = 0;
    for (Entry<Key,Value> entry : scanner) {
      System.out.println(entry.getKey().toString() + " -> " + entry.getValue().toString());
      ++numberOfEntriesRead;
    }
    // You can add additional metadata (key, values) to Spans which will be able to be viewed in the Monitor
    readSpan.data("Number of Entries Read", String.valueOf(numberOfEntriesRead));

    Trace.off();
  }
View Full Code Here

          containsMetadataTablet = true;
     
      if (!containsMetadataTablet && us.queuedMutations.size() > 0)
        TabletServer.this.resourceManager.waitUntilCommitsAreEnabled();
     
      Span prep = Trace.start("prep");
      for (Entry<Tablet,? extends List<Mutation>> entry : us.queuedMutations.entrySet()) {
       
        Tablet tablet = entry.getKey();
        List<Mutation> mutations = entry.getValue();
        if (mutations.size() > 0) {
          try {
            if (updateMetrics.isEnabled())
              updateMetrics.add(TabletServerUpdateMetrics.mutationArraySize, mutations.size());
           
            CommitSession commitSession = tablet.prepareMutationsForCommit(us.cenv, mutations);
            if (commitSession == null) {
              if (us.currentTablet == tablet) {
                us.currentTablet = null;
              }
              us.failures.put(tablet.getExtent(), us.successfulCommits.get(tablet));
            } else {
              sendables.put(commitSession, mutations);
              mutationCount += mutations.size();
            }
           
          } catch (TConstraintViolationException e) {
            us.violations.add(e.getViolations());
            if (updateMetrics.isEnabled())
              updateMetrics.add(TabletServerUpdateMetrics.constraintViolations, 0);
           
            if (e.getNonViolators().size() > 0) {
              // only log and commit mutations if there were some
              // that did not
              // violate constraints... this is what
              // prepareMutationsForCommit()
              // expects
              sendables.put(e.getCommitSession(), e.getNonViolators());
            }
           
            mutationCount += mutations.size();
           
          } catch (HoldTimeoutException t) {
            error = t;
            log.debug("Giving up on mutations due to a long memory hold time");
            break;
          } catch (Throwable t) {
            error = t;
            log.error("Unexpected error preparing for commit", error);
            break;
          }
        }
      }
      prep.stop();
     
      Span wal = Trace.start("wal");
      long pt2 = System.currentTimeMillis();
      long avgPrepareTime = (long) ((pt2 - pt1) / (double) us.queuedMutations.size());
      us.prepareTimes.addStat(pt2 - pt1);
      if (updateMetrics.isEnabled())
        updateMetrics.add(TabletServerUpdateMetrics.commitPrep, (avgPrepareTime));
     
      if (error != null) {
        for (Entry<CommitSession,List<Mutation>> e : sendables.entrySet()) {
          e.getKey().abortCommit(e.getValue());
        }
        throw new RuntimeException(error);
      }
      try {
        while (true) {
          try {
            long t1 = System.currentTimeMillis();
           
            logger.logManyTablets(sendables);
           
            long t2 = System.currentTimeMillis();
            us.walogTimes.addStat(t2 - t1);
            if (updateMetrics.isEnabled())
              updateMetrics.add(TabletServerUpdateMetrics.waLogWriteTime, (t2 - t1));
           
            break;
          } catch (IOException ex) {
            log.warn("logging mutations failed, retrying");
          } catch (Throwable t) {
            log.error("Unknown exception logging mutations, counts for mutations in flight not decremented!", t);
            throw new RuntimeException(t);
          }
        }
       
        wal.stop();
       
        Span commit = Trace.start("commit");
        long t1 = System.currentTimeMillis();
        for (Entry<CommitSession,? extends List<Mutation>> entry : sendables.entrySet()) {
          CommitSession commitSession = entry.getKey();
          List<Mutation> mutations = entry.getValue();
         
          commitSession.commit(mutations);
         
          Tablet tablet = commitSession.getTablet();
         
          if (tablet == us.currentTablet) {
            // because constraint violations may filter out some
            // mutations, for proper
            // accounting with the client code, need to increment
            // the count based
            // on the original number of mutations from the client
            // NOT the filtered number
            us.successfulCommits.increment(tablet, us.queuedMutations.get(tablet).size());
          }
        }
        long t2 = System.currentTimeMillis();
       
        long avgCommitTime = (long) ((t2 - t1) / (double) sendables.size());
       
        us.flushTime += (t2 - pt1);
        us.commitTimes.addStat(t2 - t1);
       
        if (updateMetrics.isEnabled())
          updateMetrics.add(TabletServerUpdateMetrics.commitTime, avgCommitTime);
        commit.stop();
      } finally {
        us.queuedMutations.clear();
        if (us.currentTablet != null) {
          us.queuedMutations.put(us.currentTablet, new ArrayList<Mutation>());
        }
View Full Code Here

     
      try {
        Mutation mutation = new ServerMutation(tmutation);
        List<Mutation> mutations = Collections.singletonList(mutation);
       
        Span prep = Trace.start("prep");
        CommitSession cs = tablet.prepareMutationsForCommit(new TservConstraintEnv(security, credentials), mutations);
        prep.stop();
        if (cs == null) {
          throw new NotServingTabletException(tkeyExtent);
        }
       
        while (true) {
          try {
            Span wal = Trace.start("wal");
            logger.log(cs, cs.getWALogSeq(), mutation);
            wal.stop();
            break;
          } catch (IOException ex) {
            log.warn(ex, ex);
          }
        }
       
        Span commit = Trace.start("commit");
        cs.commit(mutations);
        commit.stop();
      } catch (TConstraintViolationException e) {
        throw new ConstraintViolationException(Translator.translate(e.getViolations().asList(), Translators.CVST));
      } finally {
        writeTracker.finishWrite(opid);
      }
View Full Code Here

      this.trash = new Trash(fs, fs.getConf());
  }

  public void collect(GCStatus status) {
   
    Span span = Trace.start("scanServers");
    try {
     
      Set<String> sortedWALogs = getSortedWALogs();

      status.currentLog.started = System.currentTimeMillis();
     
      Map<String,String> fileToServerMap = new HashMap<String,String>();
      int count = scanServers(fileToServerMap);
      long fileScanStop = System.currentTimeMillis();
      log.info(String.format("Fetched %d files from %d servers in %.2f seconds", fileToServerMap.size(), count,
          (fileScanStop - status.currentLog.started) / 1000.));
      status.currentLog.candidates = fileToServerMap.size();
      span.stop();
     
      span = Trace.start("removeMetadataEntries");
      try {
        count = removeMetadataEntries(fileToServerMap, sortedWALogs, status);
      } catch (Exception ex) {
        log.error("Unable to scan metadata table", ex);
        return;
      } finally {
        span.stop();
      }
     
      long logEntryScanStop = System.currentTimeMillis();
      log.info(String.format("%d log entries scanned in %.2f seconds", count, (logEntryScanStop - fileScanStop) / 1000.));
     
      span = Trace.start("removeFiles");
      Map<String,ArrayList<String>> serverToFileMap = mapServersToFiles(fileToServerMap);
     
      count = removeFiles(serverToFileMap, sortedWALogs, status);
     
      long removeStop = System.currentTimeMillis();
      log.info(String.format("%d total logs removed from %d servers in %.2f seconds", count, serverToFileMap.size(), (removeStop - logEntryScanStop) / 1000.));
      status.currentLog.finished = removeStop;
      status.lastLog = status.currentLog;
      status.currentLog = new GcCycleStats();
      span.stop();
     
    } catch (Exception e) {
      log.error("exception occured while garbage collecting write ahead logs", e);
      span.stop();
    }
  }
View Full Code Here

  }
 
  private void compactLocalityGroup(String lgName, Set<ByteSequence> columnFamilies, boolean inclusive, FileSKVWriter mfw, CompactionStats majCStats)
      throws IOException, CompactionCanceledException {
    ArrayList<FileSKVIterator> readers = new ArrayList<FileSKVIterator>(filesToCompact.size());
    Span span = Trace.start("compact");
    try {
      long entriesCompacted = 0;
      List<SortedKeyValueIterator<Key,Value>> iters = openMapDataFiles(lgName, readers);
     
      if (imm != null) {
        iters.add(imm.compactionIterator());
      }
     
      CountingIterator citr = new CountingIterator(new MultiIterator(iters, extent.toDataRange()));
      DeletingIterator delIter = new DeletingIterator(citr, propogateDeletes);
      ColumnFamilySkippingIterator cfsi = new ColumnFamilySkippingIterator(delIter);
     

      // if(env.getIteratorScope() )
     
      TabletIteratorEnvironment iterEnv;
      if (env.getIteratorScope() == IteratorScope.majc)
        iterEnv = new TabletIteratorEnvironment(IteratorScope.majc, !propogateDeletes, acuTableConf);
      else if (env.getIteratorScope() == IteratorScope.minc)
        iterEnv = new TabletIteratorEnvironment(IteratorScope.minc, acuTableConf);
      else
        throw new IllegalArgumentException();
     
      SortedKeyValueIterator<Key,Value> itr = iterEnv.getTopLevelIterator(IteratorUtil.loadIterators(env.getIteratorScope(), cfsi, extent, acuTableConf,
          iterators, iterEnv));
     
      itr.seek(extent.toDataRange(), columnFamilies, inclusive);
     
      if (!inclusive) {
        mfw.startDefaultLocalityGroup();
      } else {
        mfw.startNewLocalityGroup(lgName, columnFamilies);
      }
     
      Span write = Trace.start("write");
      try {
        while (itr.hasTop() && env.isCompactionEnabled()) {
          mfw.append(itr.getTopKey(), itr.getTopValue());
          itr.next();
          entriesCompacted++;
         
          if (entriesCompacted % 1024 == 0) {
            // Periodically update stats, do not want to do this too often since its volatile
            entriesWritten.addAndGet(1024);
          }
        }

        if (itr.hasTop() && !env.isCompactionEnabled()) {
          // cancel major compaction operation
          try {
            try {
              mfw.close();
            } catch (IOException e) {
              log.error(e, e);
            }
            fs.delete(new Path(outputFile), true);
          } catch (Exception e) {
            log.warn("Failed to delete Canceled compaction output file " + outputFile, e);
          }
          throw new CompactionCanceledException();
        }
       
      } finally {
        CompactionStats lgMajcStats = new CompactionStats(citr.getCount(), entriesCompacted);
        majCStats.add(lgMajcStats);
        write.stop();
      }
     
    } finally {
      // close sequence files opened
      for (FileSKVIterator reader : readers) {
View Full Code Here

    tinfo = Tracer.traceInfo();
  }
 
  @Override
  public long isReady(long tid, T environment) throws Exception {
    Span span = Trace.trace(tinfo, repo.getDescription());
    try {
      return repo.isReady(tid, environment);
    } finally {
      span.stop();
    }
  }
View Full Code Here

    }
  }
 
  @Override
  public Repo<T> call(long tid, T environment) throws Exception {
    Span span = Trace.trace(tinfo, repo.getDescription());
    try {
      Repo<T> result = repo.call(tid, environment);
      if (result == null)
        return result;
      return new TraceRepo<T>(result);
    } finally {
      span.stop();
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.accumulo.trace.instrument.Span

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.