Examples of INodeDirectory


Examples of org.apache.hadoop.hdfs.server.namenode.INodeDirectory

   */
  public static int findLatestSnapshot(INode inode, final int anchor) {
    int latest = NO_SNAPSHOT_ID;
    for(; inode != null; inode = inode.getParent()) {
      if (inode.isDirectory()) {
        final INodeDirectory dir = inode.asDirectory();
        if (dir.isWithSnapshot()) {
          latest = dir.getDiffs().updatePrior(anchor, latest);
        }
      }
    }
    return latest;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeDirectory

      SnapshotSection section = SnapshotSection.parseDelimitedFrom(in);
      int snum = section.getNumSnapshots();
      sm.setNumSnapshots(snum);
      sm.setSnapshotCounter(section.getSnapshotCounter());
      for (long sdirId : section.getSnapshottableDirList()) {
        INodeDirectory dir = fsDir.getInode(sdirId).asDirectory();
        final INodeDirectorySnapshottable sdir;
        if (!dir.isSnapshottable()) {
          sdir = new INodeDirectorySnapshottable(dir);
          fsDir.addToInodeMap(sdir);
        } else {
          // dir is root, and admin set root to snapshottable before
          sdir = (INodeDirectorySnapshottable) dir;
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeDirectory

    private void loadSnapshots(InputStream in, int size) throws IOException {
      for (int i = 0; i < size; i++) {
        SnapshotSection.Snapshot pbs = SnapshotSection.Snapshot
            .parseDelimitedFrom(in);
        INodeDirectory root = loadINodeDirectory(pbs.getRoot(),
            parent.getLoaderContext());
        int sid = pbs.getSnapshotId();
        INodeDirectorySnapshottable parent = (INodeDirectorySnapshottable) fsDir
            .getInode(root.getId()).asDirectory();
        Snapshot snapshot = new Snapshot(sid, root, parent);
        // add the snapshot to parent, since we follow the sequence of
        // snapshotsByNames when saving, we do not need to sort when loading
        parent.addSnapshot(snapshot);
        snapshotMap.put(sid, snapshot);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeDirectory

      }
    } else if (inode.isFile()) {
      inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes, true);
    } else if (inode.isDirectory()) {
      Map<INode, INode> excludedNodes = null;
      INodeDirectory dir = inode.asDirectory();
      DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
      if (sf != null) {
        DirectoryDiffList diffList = sf.getDiffs();
        DirectoryDiff priorDiff = diffList.getDiffById(prior);
        if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
          List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeDirectory

      } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
        INodeFile file = topNode.asFile();
        counts.add(file.getDiffs().deleteSnapshotDiff(post, prior, file,
            collectedBlocks, removedINodes, countDiffChange));
      } else if (topNode.isDirectory()) {
        INodeDirectory dir = topNode.asDirectory();
        ChildrenDiff priorChildrenDiff = null;
        DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
        if (sf != null) {
          // delete files/dirs created after prior. Note that these
          // files/dirs, along with inode, were deleted right after post.
          DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
          if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
            priorChildrenDiff = priorDiff.getChildrenDiff();
            counts.add(priorChildrenDiff.destroyCreatedList(dir,
                collectedBlocks, removedINodes));
          }
        }
       
        for (INode child : dir.getChildrenList(prior)) {
          if (priorChildrenDiff != null
              && priorChildrenDiff.search(ListType.DELETED,
                  child.getLocalNameBytes()) != null) {
            continue;
          }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeDirectory

   * If the path is already a snapshottable directory, update the quota.
   */
  public void setSnapshottable(final String path, boolean checkNestedSnapshottable)
      throws IOException {
    final INodesInPath iip = fsdir.getINodesInPath4Write(path);
    final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
    if (checkNestedSnapshottable) {
      checkNestedSnapshottable(d, path);
    }


    final INodeDirectorySnapshottable s;
    if (d.isSnapshottable()) {
      //The directory is already a snapshottable directory.
      s = (INodeDirectorySnapshottable)d;
      s.setSnapshotQuota(INodeDirectorySnapshottable.SNAPSHOT_LIMIT);
    } else {
      s = d.replaceSelf4INodeDirectorySnapshottable(iip.getLatestSnapshotId(),
          fsdir.getINodeMap());
    }
    addSnapshottable(s);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeDirectory

   *
   * @throws SnapshotException if there are snapshots in the directory.
   */
  public void resetSnapshottable(final String path) throws IOException {
    final INodesInPath iip = fsdir.getINodesInPath4Write(path);
    final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
    if (!d.isSnapshottable()) {
      // the directory is already non-snapshottable
      return;
    }
    final INodeDirectorySnapshottable s = (INodeDirectorySnapshottable) d;
    if (s.getNumSnapshots() > 0) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeDirectory

  static final int SNAPSHOT_LIMIT = 1 << 16;

  /** Cast INode to INodeDirectorySnapshottable. */
  static public INodeDirectorySnapshottable valueOf(
      INode inode, String src) throws IOException {
    final INodeDirectory dir = INodeDirectory.valueOf(inode, src);
    if (!dir.isSnapshottable()) {
      throw new SnapshotException(
          "Directory is not a snapshottable directory: " + src);
    }
    return (INodeDirectorySnapshottable)dir;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeDirectory

        throw new SnapshotException("The snapshot " + newName
            + " already exists for directory " + path);
      }
      // remove the one with old name from snapshotsByNames
      Snapshot snapshot = snapshotsByNames.remove(indexOfOld);
      final INodeDirectory ssRoot = snapshot.getRoot();
      ssRoot.setLocalName(newNameBytes);
      indexOfNew = -indexOfNew - 1;
      if (indexOfNew <= indexOfOld) {
        snapshotsByNames.add(indexOfNew, snapshot);
      } else { // indexOfNew > indexOfOld
        snapshotsByNames.add(indexOfNew - 1, snapshot);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeDirectory

      final Snapshot snapshot = snapshotsByNames.get(i);
      int prior = Snapshot.findLatestSnapshot(this, snapshot.getId());
      try {
        Quota.Counts counts = cleanSubtree(snapshot.getId(), prior,
            collectedBlocks, removedINodes, true);
        INodeDirectory parent = getParent();
        if (parent != null) {
          // there will not be any WithName node corresponding to the deleted
          // snapshot, thus only update the quota usage in the current tree
          parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
              -counts.get(Quota.DISKSPACE), true);
        }
      } catch(QuotaExceededException e) {
        LOG.error("BUG: removeSnapshot increases namespace usage.", e);
      }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.