Package org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot

Examples of org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root


      }
      b.build().writeDelimitedTo(out);
      int i = 0;
      for(INodeDirectorySnapshottable sdir : snapshottables) {
        for(Snapshot s : sdir.getSnapshotsByNames()) {
          Root sroot = s.getRoot();
          SnapshotSection.Snapshot.Builder sb = SnapshotSection.Snapshot
              .newBuilder().setSnapshotId(s.getId());
          INodeSection.INodeDirectory.Builder db = buildINodeDirectory(sroot,
              parent.getSaverContext());
          INodeSection.INode r = INodeSection.INode.newBuilder()
              .setId(sroot.getId())
              .setType(INodeSection.INode.Type.DIRECTORY)
              .setName(ByteString.copyFrom(sroot.getLocalNameBytes()))
              .setDirectory(db).build();
          sb.setRoot(r).build().writeDelimitedTo(out);
          i++;
          if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
            context.checkCancelled();
View Full Code Here


    int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
    skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
    int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
    final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
    for (int i = 0; i < numOfListing; i++) {
      Root sRoot = snapshots.get(i + skipSize).getRoot();
      listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot,
          Snapshot.CURRENT_STATE_ID);
    }
    return new DirectoryListing(
        listing, snapshots.size() - skipSize - numOfListing);
  }
View Full Code Here

    int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
    skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
    int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
    final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
    for (int i = 0; i < numOfListing; i++) {
      Root sRoot = snapshots.get(i + skipSize).getRoot();
      listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, null);
    }
    return new DirectoryListing(
        listing, snapshots.size() - skipSize - numOfListing);
  }
View Full Code Here

    int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
    skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
    int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
    final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
    for (int i = 0; i < numOfListing; i++) {
      Root sRoot = snapshots.get(i + skipSize).getRoot();
      listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, null);
    }
    return new DirectoryListing(
        listing, snapshots.size() - skipSize - numOfListing);
  }
View Full Code Here

    int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
    skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
    int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
    final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
    for (int i = 0; i < numOfListing; i++) {
      Root sRoot = snapshots.get(i + skipSize).getRoot();
      listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, null);
    }
    return new DirectoryListing(
        listing, snapshots.size() - skipSize - numOfListing);
  }
View Full Code Here

    int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
    skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
    int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
    final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
    for (int i = 0; i < numOfListing; i++) {
      Root sRoot = snapshots.get(i + skipSize).getRoot();
      listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot,
          Snapshot.CURRENT_STATE_ID);
    }
    return new DirectoryListing(
        listing, snapshots.size() - skipSize - numOfListing);
  }
View Full Code Here

    int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
    skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
    int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
    final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
    for (int i = 0; i < numOfListing; i++) {
      Root sRoot = snapshots.get(i + skipSize).getRoot();
      listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot,
          Snapshot.CURRENT_STATE_ID);
    }
    return new DirectoryListing(
        listing, snapshots.size() - skipSize - numOfListing);
  }
View Full Code Here

      }
      b.build().writeDelimitedTo(out);
      int i = 0;
      for(INodeDirectorySnapshottable sdir : snapshottables) {
        for(Snapshot s : sdir.getSnapshotsByNames()) {
          Root sroot = s.getRoot();
          SnapshotSection.Snapshot.Builder sb = SnapshotSection.Snapshot
              .newBuilder().setSnapshotId(s.getId());
          INodeSection.INodeDirectory.Builder db = buildINodeDirectory(sroot,
              parent.getSaverContext());
          INodeSection.INode r = INodeSection.INode.newBuilder()
              .setId(sroot.getId())
              .setType(INodeSection.INode.Type.DIRECTORY)
              .setName(ByteString.copyFrom(sroot.getLocalNameBytes()))
              .setDirectory(db).build();
          sb.setRoot(r).build().writeDelimitedTo(out);
          i++;
          if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
            context.checkCancelled();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.