Examples of StartupProgress


Examples of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress

    private synchronized void incrementSafeBlockCount(short replication) {
      if (replication == safeReplication) {
        this.blockSafe++;

        // Report startup progress only if we haven't completed startup yet.
        StartupProgress prog = NameNode.getStartupProgress();
        if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
          if (this.awaitingReportedBlocksCounter == null) {
            this.awaitingReportedBlocksCounter = prog.getCounter(Phase.SAFEMODE,
              STEP_AWAITING_REPORTED_BLOCKS);
          }
          this.awaitingReportedBlocksCounter.increment();
        }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress

   
    long expectedTxId = expectedStartingTxId;
    long numEdits = 0;
    long lastTxId = in.getLastTxId();
    long numTxns = (lastTxId - expectedStartingTxId) + 1;
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = createStartupProgressStep(in);
    prog.setTotal(Phase.LOADING_EDITS, step, numTxns);
    Counter counter = prog.getCounter(Phase.LOADING_EDITS, step);
    long lastLogTime = now();
    long lastInodeId = fsNamesys.getLastInodeId();
   
    try {
      while (true) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress

   * This is where we apply edits that we've been writing to disk all
   * along.
   */
  long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
      MetaRecoveryContext recovery) throws IOException {
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = createStartupProgressStep(edits);
    prog.beginStep(Phase.LOADING_EDITS, step);
    fsNamesys.writeLock();
    try {
      long startTime = now();
      FSImage.LOG.info("Start loading edits file " + edits.getName());
      long numEdits = loadEditRecords(edits, false,
                                 expectedStartingTxId, recovery);
      FSImage.LOG.info("Edits file " + edits.getName()
          + " of size " + edits.length() + " edits # " + numEdits
          + " loaded in " + (now()-startTime)/1000 + " seconds");
      return numEdits;
    } finally {
      edits.close();
      fsNamesys.writeUnlock();
      prog.endStep(Phase.LOADING_EDITS, step);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress

    void load(File curFile) throws IOException {
      checkNotLoaded();
      assert curFile != null : "curFile is null";

      StartupProgress prog = NameNode.getStartupProgress();
      Step step = new Step(StepType.INODES);
      prog.beginStep(Phase.LOADING_FSIMAGE, step);
      long startTime = now();

      //
      // Load in bits
      //
      MessageDigest digester = MD5Hash.getDigester();
      DigestInputStream fin = new DigestInputStream(
           new FileInputStream(curFile), digester);

      DataInputStream in = new DataInputStream(fin);
      try {
        // read image version: first appeared in version -1
        int imgVersion = in.readInt();
        if (getLayoutVersion() != imgVersion) {
          throw new InconsistentFSStateException(curFile,
              "imgVersion " + imgVersion +
              " expected to be " + getLayoutVersion());
        }
        boolean supportSnapshot = LayoutVersion.supports(Feature.SNAPSHOT,
            imgVersion);

        // read namespaceID: first appeared in version -2
        in.readInt();

        long numFiles = in.readLong();

        // read in the last generation stamp for legacy blocks.
        long genstamp = in.readLong();
        namesystem.setGenerationStampV1(genstamp);
       
        if (LayoutVersion.supports(Feature.SEQUENTIAL_BLOCK_ID, imgVersion)) {
          // read the starting generation stamp for sequential block IDs
          genstamp = in.readLong();
          namesystem.setGenerationStampV2(genstamp);

          // read the last generation stamp for blocks created after
          // the switch to sequential block IDs.
          long stampAtIdSwitch = in.readLong();
          namesystem.setGenerationStampV1Limit(stampAtIdSwitch);

          // read the max sequential block ID.
          long maxSequentialBlockId = in.readLong();
          namesystem.setLastAllocatedBlockId(maxSequentialBlockId);
        } else {
          long startingGenStamp = namesystem.upgradeGenerationStampToV2();
          // This is an upgrade.
          LOG.info("Upgrading to sequential block IDs. Generation stamp " +
                   "for new blocks set to " + startingGenStamp);
        }

        // read the transaction ID of the last edit represented by
        // this image
        if (LayoutVersion.supports(Feature.STORED_TXIDS, imgVersion)) {
          imgTxId = in.readLong();
        } else {
          imgTxId = 0;
        }

        // read the last allocated inode id in the fsimage
        if (LayoutVersion.supports(Feature.ADD_INODE_ID, imgVersion)) {
          long lastInodeId = in.readLong();
          namesystem.resetLastInodeId(lastInodeId);
          if (LOG.isDebugEnabled()) {
            LOG.debug("load last allocated InodeId from fsimage:" + lastInodeId);
          }
        } else {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Old layout version doesn't have inode id."
                + " Will assign new id for each inode.");
          }
        }
       
        if (supportSnapshot) {
          snapshotMap = namesystem.getSnapshotManager().read(in, this);
        }

        // read compression related info
        FSImageCompression compression;
        if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imgVersion)) {
          compression = FSImageCompression.readCompressionHeader(conf, in);
        } else {
          compression = FSImageCompression.createNoopCompression();
        }
        in = compression.unwrapInputStream(fin);

        LOG.info("Loading image file " + curFile + " using " + compression);
       
        // load all inodes
        LOG.info("Number of files = " + numFiles);
        prog.setTotal(Phase.LOADING_FSIMAGE, step, numFiles);
        Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
        if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
            imgVersion)) {
          if (supportSnapshot) {
            loadLocalNameINodesWithSnapshot(numFiles, in, counter);
          } else {
            loadLocalNameINodes(numFiles, in, counter);
          }
        } else {
          loadFullNameINodes(numFiles, in, counter);
        }

        loadFilesUnderConstruction(in, supportSnapshot, counter);
        prog.endStep(Phase.LOADING_FSIMAGE, step);
        // Now that the step is finished, set counter equal to total to adjust
        // for possible under-counting due to reference inodes.
        prog.setCount(Phase.LOADING_FSIMAGE, step, numFiles);

        loadSecretManagerState(in);

        // make sure to read to the end of file
        boolean eof = (in.read() == -1);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress

      final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
      FSDirectory fsDir = sourceNamesystem.dir;
      String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath();
      Step step = new Step(StepType.INODES, sdPath);
      StartupProgress prog = NameNode.getStartupProgress();
      prog.beginStep(Phase.SAVING_CHECKPOINT, step);
      prog.setTotal(Phase.SAVING_CHECKPOINT, step,
        fsDir.rootDir.numItemsInTree());
      Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
      long startTime = now();
      //
      // Write out data
      //
      MessageDigest digester = MD5Hash.getDigester();
      FileOutputStream fout = new FileOutputStream(newFile);
      DigestOutputStream fos = new DigestOutputStream(fout, digester);
      DataOutputStream out = new DataOutputStream(fos);
      try {
        out.writeInt(HdfsConstants.LAYOUT_VERSION);
        // We use the non-locked version of getNamespaceInfo here since
        // the coordinating thread of saveNamespace already has read-locked
        // the namespace for us. If we attempt to take another readlock
        // from the actual saver thread, there's a potential of a
        // fairness-related deadlock. See the comments on HDFS-2223.
        out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
            .getNamespaceID());
        out.writeLong(fsDir.rootDir.numItemsInTree());
        out.writeLong(sourceNamesystem.getGenerationStampV1());
        out.writeLong(sourceNamesystem.getGenerationStampV2());
        out.writeLong(sourceNamesystem.getGenerationStampAtblockIdSwitch());
        out.writeLong(sourceNamesystem.getLastAllocatedBlockId());
        out.writeLong(context.getTxId());
        out.writeLong(sourceNamesystem.getLastInodeId());

       
        sourceNamesystem.getSnapshotManager().write(out);
       
        // write compression info and set up compressed stream
        out = compression.writeHeaderAndWrapStream(fos);
        LOG.info("Saving image file " + newFile +
                 " using " + compression);

        // save the root
        saveINode2Image(fsDir.rootDir, out, false, referenceMap, counter);
        // save the rest of the nodes
        saveImage(fsDir.rootDir, out, true, counter);
        prog.endStep(Phase.SAVING_CHECKPOINT, step);
        // Now that the step is finished, set counter equal to total to adjust
        // for possible under-counting due to reference inodes.
        prog.setCount(Phase.SAVING_CHECKPOINT, step,
          fsDir.rootDir.numItemsInTree());
        // save files under construction
        sourceNamesystem.saveFilesUnderConstruction(out);
        context.checkCancelled();
        sourceNamesystem.saveSecretManagerState(out, sdPath);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress

        fsImage.recoverTransitionRead(startOpt, this, recovery) && !haEnabled;
      if (needToSave) {
        fsImage.saveNamespace(this);
      } else {
        // No need to save, so mark the phase done.
        StartupProgress prog = NameNode.getStartupProgress();
        prog.beginPhase(Phase.SAVING_CHECKPOINT);
        prog.endPhase(Phase.SAVING_CHECKPOINT);
      }
      // This will start a new log segment and write to the seen_txid file, so
      // we shouldn't do it when coming up in standby state
      if (!haEnabled) {
        fsImage.openEditLogForWrite();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress

    try {
      nnResourceChecker = new NameNodeResourceChecker(conf);
      checkAvailableResources();
      assert safeMode != null &&
        !safeMode.isPopulatingReplQueues();
      StartupProgress prog = NameNode.getStartupProgress();
      prog.beginPhase(Phase.SAFEMODE);
      prog.setTotal(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS,
        getCompleteBlocksTotal());
      setBlockTotal();
      blockManager.activate(conf);
    } finally {
      writeUnlock();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress

  /**
   * Private helper methods to save delegation keys and tokens in fsimage
   */
  private synchronized void saveCurrentTokens(DataOutputStream out,
      String sdPath) throws IOException {
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
    prog.beginStep(Phase.SAVING_CHECKPOINT, step);
    prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
    Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
    out.writeInt(currentTokens.size());
    Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
        .iterator();
    while (iter.hasNext()) {
      DelegationTokenIdentifier id = iter.next();
      id.write(out);
      DelegationTokenInformation info = currentTokens.get(id);
      out.writeLong(info.getRenewDate());
      counter.increment();
    }
    prog.endStep(Phase.SAVING_CHECKPOINT, step);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress

  /*
   * Save the current state of allKeys
   */
  private synchronized void saveAllKeys(DataOutputStream out, String sdPath)
      throws IOException {
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = new Step(StepType.DELEGATION_KEYS, sdPath);
    prog.beginStep(Phase.SAVING_CHECKPOINT, step);
    prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
    Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
    out.writeInt(allKeys.size());
    Iterator<Integer> iter = allKeys.keySet().iterator();
    while (iter.hasNext()) {
      Integer key = iter.next();
      allKeys.get(key).write(out);
      counter.increment();
    }
    prog.endStep(Phase.SAVING_CHECKPOINT, step);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress

  /**
   * Private helper methods to load Delegation tokens from fsimage
   */
  private synchronized void loadCurrentTokens(DataInput in)
      throws IOException {
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = new Step(StepType.DELEGATION_TOKENS);
    prog.beginStep(Phase.LOADING_FSIMAGE, step);
    int numberOfTokens = in.readInt();
    prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfTokens);
    Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
    for (int i = 0; i < numberOfTokens; i++) {
      DelegationTokenIdentifier id = new DelegationTokenIdentifier();
      id.readFields(in);
      long expiryTime = in.readLong();
      addPersistedDelegationToken(id, expiryTime);
      counter.increment();
    }
    prog.endStep(Phase.LOADING_FSIMAGE, step);
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.