Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.Path.makeQualified()


      for (PolicyList policyList : cnode.getAllPolicies()) {
        for (PolicyInfo p : policyList.getAll()) {
          if (p.getName().equals("policy1")) {
            Path srcPath = new Path("/user/dhruba/raidtest");
            assertTrue(p.getSrcPath().equals(
                srcPath.makeQualified(srcPath.getFileSystem(conf))));
          } else {
            assertTrue(p.getName().equals("policy2"));
            Path srcPath = new Path("/user/dhruba/raidtest2");
            assertTrue(p.getSrcPath().equals(
                srcPath.makeQualified(srcPath.getFileSystem(conf))));
View Full Code Here


                srcPath.makeQualified(srcPath.getFileSystem(conf))));
          } else {
            assertTrue(p.getName().equals("policy2"));
            Path srcPath = new Path("/user/dhruba/raidtest2");
            assertTrue(p.getSrcPath().equals(
                srcPath.makeQualified(srcPath.getFileSystem(conf))));
          }
          assertEquals(targetReplication,
                       Integer.parseInt(p.getProperty("targetReplication")));
          assertEquals(metaReplication,
                       Integer.parseInt(p.getProperty("metaReplication")));
View Full Code Here

    LOG.info("" + fs);
    try {
      if (rootRegionDir == null) {
        rootRegionDir = TEST_UTIL.getDataTestDir("HLogPerformanceEvaluation");
      }
      rootRegionDir = rootRegionDir.makeQualified(fs);
      cleanRegionRootDir(fs, rootRegionDir);
      // Initialize Table Descriptor
      HTableDescriptor htd = createHTableDescriptor(numFamilies);
      final long whenToRoll = roll;
      HLog hlog = new HLog(fs, new Path(rootRegionDir, "wals"),
View Full Code Here

      int blockReplication = 1;
      long blockSize = DEFAULT_BLOCK_SIZE; // Block Size not known.
      long modTime = -1; // Modification time of root dir not known.
      Path root = new Path("/");
      return new FileStatus(length, isDir, blockReplication, blockSize,
          modTime, root.makeQualified(this));
    }
    String pathName = parentPath.toUri().getPath();
    FTPFile[] ftpFiles = client.listFiles(pathName);
    if (ftpFiles != null) {
      for (FTPFile ftpFile : ftpFiles) {
View Full Code Here

    FsPermission permission = getPermissions(ftpFile);
    String user = ftpFile.getUser();
    String group = ftpFile.getGroup();
    Path filePath = new Path(parentPath, ftpFile.getName());
    return new FileStatus(length, isDir, blockReplication, blockSize, modTime,
        accessTime, permission, user, group, filePath.makeQualified(this));
  }

  @Override
  public boolean mkdirs(Path file, FsPermission permission) throws IOException {
    FTPClient client = connect();
View Full Code Here

   * @throws IOException e
   */
  public static Path getRootDir(final Configuration c) throws IOException {
    Path p = new Path(c.get(HConstants.HBASE_DIR));
    FileSystem fs = p.getFileSystem(c);
    return p.makeQualified(fs);
  }

  /**
   * Checks if root region exists
   *
 
View Full Code Here

                                   "partitions_" + UUID.randomUUID());
    LOG.info("Writing partition information to " + partitionsPath);

    FileSystem fs = partitionsPath.getFileSystem(conf);
    writePartitions(conf, partitionsPath, startKeys);
    partitionsPath.makeQualified(fs);

    URI cacheUri;
    try {
      // Below we make explicit reference to the bundled TOP.  Its cheating.
      // We are assume the define in the hbase bundled TOP is as it is in
View Full Code Here

    LOG.info("FileSystem: " + fs);
    try {
      if (rootRegionDir == null) {
        rootRegionDir = TEST_UTIL.getDataTestDir("HLogPerformanceEvaluation");
      }
      rootRegionDir = rootRegionDir.makeQualified(fs);
      cleanRegionRootDir(fs, rootRegionDir);
      // Initialize Table Descriptor
      HTableDescriptor htd = createHTableDescriptor(numFamilies);
      final long whenToRoll = roll;
      HLog hlog = new FSHLog(fs, rootRegionDir, "wals", getConf()) {
View Full Code Here

    List<StoreFile> outputStoreFiles = new ArrayList<StoreFile>(compactionOutputs.size());
    for (String compactionOutput : compactionOutputs) {
      //we should have this store file already
      boolean found = false;
      Path outputPath = new Path(fs.getStoreDir(family.getNameAsString()), compactionOutput);
      outputPath = outputPath.makeQualified(fs.getFileSystem());
      for (StoreFile sf : this.getStorefiles()) {
        if (sf.getPath().makeQualified(sf.getPath().getFileSystem(conf)).equals(outputPath)) {
          found = true;
          break;
        }
View Full Code Here

    }

    List<Path> inputPaths = new ArrayList<Path>(compactionInputs.size());
    for (String compactionInput : compactionInputs) {
      Path inputPath = new Path(fs.getStoreDir(family.getNameAsString()), compactionInput);
      inputPath = inputPath.makeQualified(fs.getFileSystem());
      inputPaths.add(inputPath);
    }

    //some of the input files might already be deleted
    List<StoreFile> inputStoreFiles = new ArrayList<StoreFile>(compactionInputs.size());
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.