Examples of DirectoryListing


Examples of org.apache.hadoop.hdfs.protocol.DirectoryListing

      LOG.debug("NFS READDIR fileId: " + handle.getFileId() + " cookie: "
          + cookie + " count: " + count);
    }

    HdfsFileStatus dirStatus = null;
    DirectoryListing dlisting = null;
    Nfs3FileAttributes postOpAttr = null;
    long dotdotFileId = 0;
    try {
      String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
      dirStatus = dfsClient.getFileInfo(dirFileIdPath);
      if (dirStatus == null) {
        LOG.info("Can't get path for fileId:" + handle.getFileId());
        return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
      }
      if (!dirStatus.isDir()) {
        LOG.error("Can't readdir for regular file, fileId:"
            + handle.getFileId());
        return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR);
      }
      long cookieVerf = request.getCookieVerf();
      if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
        if (aixCompatMode) {
          // The AIX NFS client misinterprets RFC-1813 and will repeatedly send
          // the same cookieverf value even across VFS-level readdir calls,
          // instead of getting a new cookieverf for every VFS-level readdir
          // call, and reusing the cookieverf only in the event that multiple
          // incremental NFS-level readdir calls must be made to fetch all of
          // the directory entries. This means that whenever a readdir call is
          // made by an AIX NFS client for a given directory, and that directory
          // is subsequently modified, thus changing its mtime, no later readdir
          // calls will succeed from AIX for that directory until the FS is
          // unmounted/remounted. See HDFS-6549 for more info.
          LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " +
              "mismatches.");
        } else {
          LOG.error("CookieVerf mismatch. request cookieVerf: " + cookieVerf
              + " dir cookieVerf: " + dirStatus.getModificationTime());
          return new READDIR3Response(Nfs3Status.NFS3ERR_BAD_COOKIE);
        }
      }

      if (cookie == 0) {
        // Get dotdot fileId
        String dotdotFileIdPath = dirFileIdPath + "/..";
        HdfsFileStatus dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);

        if (dotdotStatus == null) {
          // This should not happen
          throw new IOException("Can't get path for handle path:"
              + dotdotFileIdPath);
        }
        dotdotFileId = dotdotStatus.getFileId();
      }

      // Get the list from the resume point
      byte[] startAfter;
      if(cookie == 0 ) {
        startAfter = HdfsFileStatus.EMPTY_NAME;
      } else {
        String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
        startAfter = inodeIdPath.getBytes();
      }
     
      dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
      postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
      if (postOpAttr == null) {
        LOG.error("Can't get path for fileId:" + handle.getFileId());
        return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
      }
    } catch (IOException e) {
      LOG.warn("Exception ", e);
      return new READDIR3Response(Nfs3Status.NFS3ERR_IO);
    }

    /**
     * Set up the dirents in the response. fileId is used as the cookie with one
     * exception. Linux client can either be stuck with "ls" command (on REHL)
     * or report "Too many levels of symbolic links" (Ubuntu).
     *
     * The problem is that, only two items returned, "." and ".." when the
     * namespace is empty. Both of them are "/" with the same cookie(root
     * fileId). Linux client doesn't think such a directory is a real directory.
     * Even though NFS protocol specifies cookie is an opaque data, Linux client
     * somehow doesn't like an empty dir returns same cookie for both "." and
     * "..".
     *
     * The workaround is to use 0 as the cookie for "." and always return "." as
     * the first entry in readdir/readdirplus response.
     */
    HdfsFileStatus[] fstatus = dlisting.getPartialListing();   
    int n = (int) Math.min(fstatus.length, count-2);
    boolean eof = (n < fstatus.length) ? false : (dlisting
        .getRemainingEntries() == 0);
   
    Entry3[] entries;
    if (cookie == 0) {
      entries = new Entry3[n + 2];
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DirectoryListing

      LOG.debug("NFS READDIRPLUS fileId: " + handle.getFileId() + " cookie: "
          + cookie + " dirCount: " + dirCount + " maxCount: " + maxCount);
    }

    HdfsFileStatus dirStatus;
    DirectoryListing dlisting = null;
    Nfs3FileAttributes postOpDirAttr = null;
    long dotdotFileId = 0;
    try {
      String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
      dirStatus = dfsClient.getFileInfo(dirFileIdPath);
      if (dirStatus == null) {
        LOG.info("Can't get path for fileId:" + handle.getFileId());
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
      }
      if (!dirStatus.isDir()) {
        LOG.error("Can't readdirplus for regular file, fileId:"
            + handle.getFileId());
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR);
      }
      long cookieVerf = request.getCookieVerf();
      if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
        if (aixCompatMode) {
          // The AIX NFS client misinterprets RFC-1813 and will repeatedly send
          // the same cookieverf value even across VFS-level readdir calls,
          // instead of getting a new cookieverf for every VFS-level readdir
          // call. This means that whenever a readdir call is made by an AIX NFS
          // client for a given directory, and that directory is subsequently
          // modified, thus changing its mtime, no later readdir calls will
          // succeed for that directory from AIX until the FS is
          // unmounted/remounted. See HDFS-6549 for more info.
          LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " +
              "mismatches.");
        } else {
          LOG.error("cookieverf mismatch. request cookieverf: " + cookieVerf
              + " dir cookieverf: " + dirStatus.getModificationTime());
          return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_BAD_COOKIE);
        }
      }

      if (cookie == 0) {
        // Get dotdot fileId
        String dotdotFileIdPath = dirFileIdPath + "/..";
        HdfsFileStatus dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);

        if (dotdotStatus == null) {
          // This should not happen
          throw new IOException("Can't get path for handle path:"
              + dotdotFileIdPath);
        }
        dotdotFileId = dotdotStatus.getFileId();
      }

      // Get the list from the resume point
      byte[] startAfter;
      if (cookie == 0) {
        startAfter = HdfsFileStatus.EMPTY_NAME;
      } else {
        String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
        startAfter = inodeIdPath.getBytes();
      }
     
      dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
      postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
      if (postOpDirAttr == null) {
        LOG.info("Can't get path for fileId:" + handle.getFileId());
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
      }
    } catch (IOException e) {
      LOG.warn("Exception ", e);
      return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_IO);
    }
   
    // Set up the dirents in the response
    HdfsFileStatus[] fstatus = dlisting.getPartialListing();
    int n = (int) Math.min(fstatus.length, dirCount-2);
    boolean eof = (n < fstatus.length) ? false : (dlisting
        .getRemainingEntries() == 0);
   
    READDIRPLUS3Response.EntryPlus3[] entries;
    if (cookie == 0) {
      entries = new READDIRPLUS3Response.EntryPlus3[n+2];
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DirectoryListing

    String path = file.getFullName(parent);
    boolean isOpen = false;

    if (file.isDir()) {
      byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
      DirectoryListing thisListing;
      if (showFiles) {
        out.println(path + " <dir>");
      }
      res.totalDirs++;
      do {
        assert lastReturnedName != null;
        thisListing = namenode.getRpcServer().getListing(
            path, lastReturnedName, false);
        if (thisListing == null) {
          return;
        }
        HdfsFileStatus[] files = thisListing.getPartialListing();
        for (int i = 0; i < files.length; i++) {
          check(path, files[i], res);
        }
        lastReturnedName = thisListing.getLastName();
      } while (thisListing.hasMore());
      return;
    }
    if (file.isSymlink()) {
      if (showFiles) {
        out.println(path + " <symlink>");
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DirectoryListing

   * @throws IOException if other I/O error occurred
   */
  DirectoryListing getListing(String src, byte[] startAfter,
      boolean needLocation)
    throws AccessControlException, UnresolvedLinkException, IOException {
    DirectoryListing dl;
    FSPermissionChecker pc = getPermissionChecker();
    readLock();
    try {
      if (isPermissionEnabled) {
        if (dir.isDir(src)) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DirectoryListing

      LOG.debug("NFS READDIR fileId: " + handle.getFileId() + " cookie: "
          + cookie + " count: " + count);
    }

    HdfsFileStatus dirStatus;
    DirectoryListing dlisting = null;
    Nfs3FileAttributes postOpAttr = null;
    long dotdotFileId = 0;
    try {
      String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
      dirStatus = dfsClient.getFileInfo(dirFileIdPath);
      if (dirStatus == null) {
        LOG.info("Can't get path for fileId:" + handle.getFileId());
        return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
      }
      if (!dirStatus.isDir()) {
        LOG.error("Can't readdir for regular file, fileId:"
            + handle.getFileId());
        return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR);
      }
      long cookieVerf = request.getCookieVerf();
      if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
        LOG.error("CookierVerf mismatch. request cookierVerf:" + cookieVerf
            + " dir cookieVerf:" + dirStatus.getModificationTime());
        return new READDIR3Response(Nfs3Status.NFS3ERR_BAD_COOKIE);
      }

      if (cookie == 0) {
        // Get dotdot fileId
        String dotdotFileIdPath = dirFileIdPath + "/..";
        HdfsFileStatus dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);

        if (dotdotStatus == null) {
          // This should not happen
          throw new IOException("Can't get path for handle path:"
              + dotdotFileIdPath);
        }
        dotdotFileId = dotdotStatus.getFileId();
      }

      // Get the list from the resume point
      byte[] startAfter;
      if(cookie == 0 ) {
        startAfter = HdfsFileStatus.EMPTY_NAME;
      } else {
        String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
        startAfter = inodeIdPath.getBytes();
      }
      dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);

      postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
      if (postOpAttr == null) {
        LOG.error("Can't get path for fileId:" + handle.getFileId());
        return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
      }
    } catch (IOException e) {
      LOG.warn("Exception ", e);
      return new READDIR3Response(Nfs3Status.NFS3ERR_IO);
    }

    /**
     * Set up the dirents in the response. fileId is used as the cookie with one
     * exception. Linux client can either be stuck with "ls" command (on REHL)
     * or report "Too many levels of symbolic links" (Ubuntu).
     *
     * The problem is that, only two items returned, "." and ".." when the
     * namespace is empty. Both of them are "/" with the same cookie(root
     * fileId). Linux client doesn't think such a directory is a real directory.
     * Even though NFS protocol specifies cookie is an opaque data, Linux client
     * somehow doesn't like an empty dir returns same cookie for both "." and
     * "..".
     *
     * The workaround is to use 0 as the cookie for "." and always return "." as
     * the first entry in readdir/readdirplus response.
     */
    HdfsFileStatus[] fstatus = dlisting.getPartialListing();   
    int n = (int) Math.min(fstatus.length, count-2);
    boolean eof = (n < fstatus.length) ? false : (dlisting
        .getRemainingEntries() == 0);
   
    Entry3[] entries;
    if (cookie == 0) {
      entries = new Entry3[n + 2];
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DirectoryListing

      LOG.debug("NFS READDIRPLUS fileId: " + handle.getFileId() + " cookie: "
          + cookie + " dirCount: " + dirCount + " maxCount: " + maxCount);
    }

    HdfsFileStatus dirStatus;
    DirectoryListing dlisting = null;
    Nfs3FileAttributes postOpDirAttr = null;
    long dotdotFileId = 0;
    try {
      String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
      dirStatus = dfsClient.getFileInfo(dirFileIdPath);
      if (dirStatus == null) {
        LOG.info("Can't get path for fileId:" + handle.getFileId());
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
      }
      if (!dirStatus.isDir()) {
        LOG.error("Can't readdirplus for regular file, fileId:"
            + handle.getFileId());
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR);
      }
      long cookieVerf = request.getCookieVerf();
      if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
        LOG.error("CookierVerf mismatch. request cookierVerf:" + cookieVerf
            + " dir cookieVerf:" + dirStatus.getModificationTime());
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_BAD_COOKIE);
      }

      if (cookie == 0) {
        // Get dotdot fileId
        String dotdotFileIdPath = dirFileIdPath + "/..";
        HdfsFileStatus dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);

        if (dotdotStatus == null) {
          // This should not happen
          throw new IOException("Can't get path for handle path:"
              + dotdotFileIdPath);
        }
        dotdotFileId = dotdotStatus.getFileId();
      }

      // Get the list from the resume point
      byte[] startAfter;
      if (cookie == 0) {
        startAfter = HdfsFileStatus.EMPTY_NAME;
      } else {
        String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
        startAfter = inodeIdPath.getBytes();
      }
      dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);

      postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
      if (postOpDirAttr == null) {
        LOG.info("Can't get path for fileId:" + handle.getFileId());
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
      }
    } catch (IOException e) {
      LOG.warn("Exception ", e);
      return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_IO);
    }
   
    // Set up the dirents in the response
    HdfsFileStatus[] fstatus = dlisting.getPartialListing();
    int n = (int) Math.min(fstatus.length, dirCount-2);
    boolean eof = (n < fstatus.length) ? false : (dlisting
        .getRemainingEntries() == 0);
   
    READDIRPLUS3Response.EntryPlus3[] entries;
    if (cookie == 0) {
      entries = new READDIRPLUS3Response.EntryPlus3[n+2];
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DirectoryListing

  @Override
  public FileStatus[] listStatus(Path p) throws IOException {
    String src = getPathName(p);
   
    // fetch the first batch of entries in the directory
    DirectoryListing thisListing = dfs.listPaths(
        src, HdfsFileStatus.EMPTY_NAME);
   
    if (thisListing == null) { // the directory does not exist
      return null;
    }
   
    HdfsFileStatus[] partialListing = thisListing.getPartialListing();
    if (!thisListing.hasMore()) { // got all entries of the directory
      FileStatus[] stats = new FileStatus[partialListing.length];
      for (int i = 0; i < partialListing.length; i++) {
        stats[i] = makeQualified(partialListing[i], p);
      }
      statistics.incrementReadOps(1);
      return stats;
    }
   
    // The directory size is too big that it needs to fetch more
    // estimate the total number of entries in the directory
    int totalNumEntries =
      partialListing.length + thisListing.getRemainingEntries();
    ArrayList<FileStatus> listing =
      new ArrayList<FileStatus>(totalNumEntries);
    // add the first batch of entries to the array list
    for (HdfsFileStatus fileStatus : partialListing) {
      listing.add(makeQualified(fileStatus, p));
    }
    statistics.incrementLargeReadOps(1);

    // now fetch more entries
    do {
      thisListing = dfs.listPaths(src, thisListing.getLastName());
     
      if (thisListing == null) {
        return null; // the directory is deleted
      }
     
      partialListing = thisListing.getPartialListing();
      for (HdfsFileStatus fileStatus : partialListing) {
        listing.add(makeQualified(fileStatus, p));
      }
      statistics.incrementLargeReadOps(1);
    } while (thisListing.hasMore());

    return listing.toArray(new FileStatus[listing.size()]);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DirectoryListing

  }

  @Override
  public DirectoryListing getListing(String src, byte[] startAfter)
  throws IOException {
    DirectoryListing files = namesystem.getListing(src, startAfter);
    myMetrics.incrNumGetListingOps();
    if (files != null) {
      myMetrics.incrNumFilesInGetListingOps(files.getPartialListing().length);
    }
    return files;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DirectoryListing

      LOG.debug("NFS READDIR fileId: " + handle.getFileId() + " cookie: "
          + cookie + " count: " + count);
    }

    HdfsFileStatus dirStatus;
    DirectoryListing dlisting = null;
    Nfs3FileAttributes postOpAttr = null;
    long dotdotFileId = 0;
    try {
      String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
      dirStatus = dfsClient.getFileInfo(dirFileIdPath);
      if (dirStatus == null) {
        LOG.info("Can't get path for fileId:" + handle.getFileId());
        return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
      }
      if (!dirStatus.isDir()) {
        LOG.error("Can't readdir for regular file, fileId:"
            + handle.getFileId());
        return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR);
      }
      long cookieVerf = request.getCookieVerf();
      if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
        LOG.error("CookierVerf mismatch. request cookierVerf:" + cookieVerf
            + " dir cookieVerf:" + dirStatus.getModificationTime());
        return new READDIR3Response(Nfs3Status.NFS3ERR_BAD_COOKIE);
      }

      if (cookie == 0) {
        // Get dotdot fileId
        String dotdotFileIdPath = dirFileIdPath + "/..";
        HdfsFileStatus dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);

        if (dotdotStatus == null) {
          // This should not happen
          throw new IOException("Can't get path for handle path:"
              + dotdotFileIdPath);
        }
        dotdotFileId = dotdotStatus.getFileId();
      }

      // Get the list from the resume point
      byte[] startAfter;
      if(cookie == 0 ) {
        startAfter = HdfsFileStatus.EMPTY_NAME;
      } else {
        String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
        startAfter = inodeIdPath.getBytes();
      }
      dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);

      postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
      if (postOpAttr == null) {
        LOG.error("Can't get path for fileId:" + handle.getFileId());
        return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
      }
    } catch (IOException e) {
      LOG.warn("Exception ", e);
      return new READDIR3Response(Nfs3Status.NFS3ERR_IO);
    }

    /**
     * Set up the dirents in the response. fileId is used as the cookie with one
     * exception. Linux client can either be stuck with "ls" command (on REHL)
     * or report "Too many levels of symbolic links" (Ubuntu).
     *
     * The problem is that, only two items returned, "." and ".." when the
     * namespace is empty. Both of them are "/" with the same cookie(root
     * fileId). Linux client doesn't think such a directory is a real directory.
     * Even though NFS protocol specifies cookie is an opaque data, Linux client
     * somehow doesn't like an empty dir returns same cookie for both "." and
     * "..".
     *
     * The workaround is to use 0 as the cookie for "." and always return "." as
     * the first entry in readdir/readdirplus response.
     */
    HdfsFileStatus[] fstatus = dlisting.getPartialListing();   
    int n = (int) Math.min(fstatus.length, count-2);
    boolean eof = (n < fstatus.length) ? false : (dlisting
        .getRemainingEntries() == 0);
   
    Entry3[] entries;
    if (cookie == 0) {
      entries = new Entry3[n + 2];
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DirectoryListing

      LOG.debug("NFS READDIRPLUS fileId: " + handle.getFileId() + " cookie: "
          + cookie + " dirCount: " + dirCount + " maxCount: " + maxCount);
    }

    HdfsFileStatus dirStatus;
    DirectoryListing dlisting = null;
    Nfs3FileAttributes postOpDirAttr = null;
    long dotdotFileId = 0;
    try {
      String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
      dirStatus = dfsClient.getFileInfo(dirFileIdPath);
      if (dirStatus == null) {
        LOG.info("Can't get path for fileId:" + handle.getFileId());
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
      }
      if (!dirStatus.isDir()) {
        LOG.error("Can't readdirplus for regular file, fileId:"
            + handle.getFileId());
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR);
      }
      long cookieVerf = request.getCookieVerf();
      if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
        LOG.error("CookierVerf mismatch. request cookierVerf:" + cookieVerf
            + " dir cookieVerf:" + dirStatus.getModificationTime());
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_BAD_COOKIE);
      }

      if (cookie == 0) {
        // Get dotdot fileId
        String dotdotFileIdPath = dirFileIdPath + "/..";
        HdfsFileStatus dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);

        if (dotdotStatus == null) {
          // This should not happen
          throw new IOException("Can't get path for handle path:"
              + dotdotFileIdPath);
        }
        dotdotFileId = dotdotStatus.getFileId();
      }

      // Get the list from the resume point
      byte[] startAfter;
      if (cookie == 0) {
        startAfter = HdfsFileStatus.EMPTY_NAME;
      } else {
        String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
        startAfter = inodeIdPath.getBytes();
      }
      dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);

      postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
      if (postOpDirAttr == null) {
        LOG.info("Can't get path for fileId:" + handle.getFileId());
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
      }
    } catch (IOException e) {
      LOG.warn("Exception ", e);
      return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_IO);
    }
   
    // Set up the dirents in the response
    HdfsFileStatus[] fstatus = dlisting.getPartialListing();
    int n = (int) Math.min(fstatus.length, dirCount-2);
    boolean eof = (n < fstatus.length) ? false : (dlisting
        .getRemainingEntries() == 0);
   
    READDIRPLUS3Response.EntryPlus3[] entries;
    if (cookie == 0) {
      entries = new READDIRPLUS3Response.EntryPlus3[n+2];
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.