Examples of makeQualified()


Examples of org.apache.hadoop.fs.FileContext.makeQualified()

    synchronized (fileInfo) {
      Path historyFilePath = fileInfo.getHistoryFile();
      FSDataInputStream in = null;
      LOG.info("JobHistoryFile is: " + historyFilePath);
      try {
        in = fc.open(fc.makeQualified(historyFilePath));
      } catch (IOException ioe) {
        LOG.info("Can not open history file: " + historyFilePath, ioe);
        throw (new Exception("Can not open History File"));
      }
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus.makeQualified()

    try {
      FileSystem fs = getFileSystem(path, false);
      FileStatus fi = fs.getFileStatus(new Path(path));

      if (fi != null) {
        fi.makeQualified(fs);
        return new DfsFileStatus(fi.getPath().toString(), fi.getLen(), fi.isDir(),
            fi.getModificationTime(), fi.getAccessTime());
      } else {
        throw new FsShellFileNotFoundException("File does not exist: " + path);
      }
View Full Code Here

Examples of org.apache.hadoop.fs.FileSystem.makeQualified()

   * @return Fully qualified path for the default hbase root dir
   * @throws IOException
   */
  public Path getDefaultRootDirPath() throws IOException {
  FileSystem fs = FileSystem.get(this.conf);
  return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
  }

  /**
   * Creates an hbase rootdir in user home directory.  Also creates hbase
   * version file.  Normally you won't make use of this method.  Root hbasedir
View Full Code Here

Examples of org.apache.hadoop.fs.FileSystem.makeQualified()

    byte [] enabledTable = Bytes.toBytes("enabledTable");
    HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable);
    htdEnabled.addFamily(new HColumnDescriptor(FAMILY));

    FileSystem filesystem = FileSystem.get(conf);
    Path rootdir = filesystem.makeQualified(
        new Path(conf.get(HConstants.HBASE_DIR)));
    // Write the .tableinfo
    FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);

    HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null);
View Full Code Here

Examples of org.apache.hadoop.fs.FileSystem.makeQualified()

    byte [] enabledTable = Bytes.toBytes("enabledTable");
    HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable);
    htdEnabled.addFamily(new HColumnDescriptor(FAMILY));
    FileSystem filesystem = FileSystem.get(conf);
    Path rootdir = filesystem.makeQualified(
           new Path(conf.get(HConstants.HBASE_DIR)));
    // Write the .tableinfo
    FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
    HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(),
        null, null);
View Full Code Here

Examples of org.apache.hadoop.fs.FileSystem.makeQualified()

      // Create a ZKW to use in the test
      ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);

      FileSystem filesystem = FileSystem.get(conf);
      Path rootdir = filesystem.makeQualified(new Path(conf
          .get(HConstants.HBASE_DIR)));

      byte[][] SPLIT_KEYS = new byte[][] { new byte[0], Bytes.toBytes("aaa"),
          Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd") };
      HTableDescriptor htdDisabled = new HTableDescriptor(Bytes.toBytes(table));
View Full Code Here

Examples of org.apache.hadoop.fs.FileSystem.makeQualified()

      throws IOException {

    // create the partitions file
    FileSystem fs = FileSystem.get(job.getConfiguration());
    Path partitionsPath = new Path("/tmp", "partitions_" + UUID.randomUUID());
    fs.makeQualified(partitionsPath);
    fs.deleteOnExit(partitionsPath);
    writePartitions(job.getConfiguration(), partitionsPath, splitPoints);

    // configure job to use it
    job.setPartitionerClass(TotalOrderPartitioner.class);
View Full Code Here

Examples of org.apache.hadoop.fs.FileSystem.makeQualified()

    // Check the output specification
    job.getOutputFormat().checkOutputSpecs(fs, job);

    // Create the splits for the job
    LOG.debug("Creating splits at " + fs.makeQualified(submitSplitFile));
    InputSplit[] splits =
      job.getInputFormat().getSplits(job, job.getNumMapTasks());
    // sort the splits into order based on size, so that the biggest
    // go first
    Arrays.sort(splits, new Comparator<InputSplit>() {
View Full Code Here

Examples of org.apache.hadoop.fs.FileSystem.makeQualified()

    String table = args[args.length - 1];
    Configuration conf = new Configuration(util.getConfiguration());

    // populate input file
    FileSystem fs = FileSystem.get(conf);
    Path inputPath = fs.makeQualified(new Path(util.getDataTestDirOnTestFS(table), "input.dat"));
    FSDataOutputStream op = fs.create(inputPath, true);
    if (data == null) {
      data = "KEY\u001bVALUE1\u001bVALUE2\n";
    }
    op.write(Bytes.toBytes(data));
View Full Code Here

Examples of org.apache.hadoop.fs.FileSystem.makeQualified()

      assertTrue(file1 + " should be a file",
          status.isDir() == false);
      assertTrue(status.getBlockSize() == blockSize);
      assertTrue(status.getReplication() == 1);
      assertTrue(status.getLen() == fileSize);
      assertEquals(fs.makeQualified(file1).toString(),
          status.getPath().toString());

      // test getVisbileLen
      DFSDataInputStream fin = (DFSDataInputStream)fs.open(file1);
      assertEquals(status.getLen(), fin.getVisibleLength());
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.