Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileSystem$Cache$Key


      printUsageAndExit();
    }

    Configuration conf = getConf();
    Path inputRoot = FSUtils.getRootDir(conf);
    FileSystem inputFs = FileSystem.get(conf);
    FileSystem outputFs = FileSystem.get(outputRoot.toUri(), conf);

    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, inputRoot);
    Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshotName, outputRoot);
    Path outputSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, outputRoot);

    // Check if the snapshot already exists
    if (outputFs.exists(outputSnapshotDir)) {
      System.err.println("The snapshot '" + snapshotName +
        "' already exists in the destination: " + outputSnapshotDir);
      return 1;
    }

    // Check if the snapshot already in-progress
    if (outputFs.exists(snapshotTmpDir)) {
      System.err.println("A snapshot with the same name '" + snapshotName + "' is in-progress");
      return 1;
    }

    // Step 0 - Extract snapshot files to copy
    final List<Pair<Path, Long>> files = getSnapshotFiles(inputFs, snapshotDir);

    // Step 1 - Copy fs1:/.snapshot/<snapshot> to  fs2:/.snapshot/.tmp/<snapshot>
    // The snapshot references must be copied before the hfiles otherwise the cleaner
    // will remove them because they are unreferenced.
    try {
      FileUtil.copy(inputFs, snapshotDir, outputFs, snapshotTmpDir, false, false, conf);
    } catch (IOException e) {
      System.err.println("Failed to copy the snapshot directory: from=" + snapshotDir +
        " to=" + snapshotTmpDir);
      e.printStackTrace(System.err);
      return 1;
    }

    // Step 2 - Start MR Job to copy files
    // The snapshot references must be copied before the files otherwise the files gets removed
    // by the HFileArchiver, since they have no references.
    try {
      if (!runCopyJob(inputRoot, outputRoot, files, verifyChecksum,
          filesUser, filesGroup, filesMode, mappers)) {
        throw new ExportSnapshotException("Snapshot export failed!");
      }

      // Step 3 - Rename fs2:/.snapshot/.tmp/<snapshot> fs2:/.snapshot/<snapshot>
      if (!outputFs.rename(snapshotTmpDir, outputSnapshotDir)) {
        System.err.println("Snapshot export failed!");
        System.err.println("Unable to rename snapshot directory from=" +
                           snapshotTmpDir + " to=" + outputSnapshotDir);
        return 1;
      }

      return 0;
    } catch (Exception e) {
      System.err.println("Snapshot export failed!");
      e.printStackTrace(System.err);
      outputFs.delete(outputSnapshotDir, true);
      return 1;
    }
  }
View Full Code Here


   * Deletes a table's directory from the file system if exists. Used in unit
   * tests.
   */
  public static void deleteTableDescriptorIfExists(String tableName,
      Configuration conf) throws IOException {
    FileSystem fs = FSUtils.getCurrentFileSystem(conf);
    FileStatus status = getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName);
    // The below deleteDirectory works for either file or directory.
    if (status != null && fs.exists(status.getPath())) {
      FSUtils.deleteDirectory(fs, status.getPath());
    }
  }
View Full Code Here

   * @param forceCreation True if we are to overwrite existing file.
   */
  static boolean createTableDescriptor(final HTableDescriptor htableDescriptor,
      final Configuration conf, boolean forceCreation)
  throws IOException {
    FileSystem fs = FSUtils.getCurrentFileSystem(conf);
    return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor,
        forceCreation);
  }
View Full Code Here

    System.err.println("         For example: HLog --split hdfs://example.com:9000/hbase/.logs/DIR");
  }

  private static void split(final Configuration conf, final Path p)
  throws IOException {
    FileSystem fs = FileSystem.get(conf);
    if (!fs.exists(p)) {
      throw new FileNotFoundException(p.toString());
    }
    final Path baseDir = new Path(conf.get(HConstants.HBASE_DIR));
    final Path oldLogDir = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME);
    if (!fs.getFileStatus(p).isDir()) {
      throw new IOException(p + " is not a directory");
    }

    HLogSplitter logSplitter = HLogSplitter.createLogSplitter(
        conf, baseDir, p, oldLogDir, fs);
View Full Code Here

   */
  void bulkLoadHFile(String srcPathStr) throws IOException {
    Path srcPath = new Path(srcPathStr);

    // Move the file if it's on another filesystem
    FileSystem srcFs = srcPath.getFileSystem(conf);
    FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;
    //We can't compare FileSystem instances as
    //equals() includes UGI instance as part of the comparison
    //and won't work when doing SecureBulkLoad
    //TODO deal with viewFS
    if (!srcFs.getUri().equals(desFs.getUri())) {
      LOG.info("File " + srcPath + " on different filesystem than " +
          "destination store - moving to this filesystem.");
      Path tmpPath = getTmpPath();
      FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
      LOG.info("Copied to temporary path on dst filesystem: " + tmpPath);
View Full Code Here

   * will be responsible to add the regions returned by this method to META and do the assignment.
   */
  @Override
  protected List<HRegionInfo> handleCreateHdfsRegions(final Path tableRootDir, final String tableName)
      throws IOException {
    FileSystem fs = fileSystemManager.getFileSystem();
    Path rootDir = fileSystemManager.getRootDir();
    Path tableDir = new Path(tableRootDir, tableName);

    try {
      // 1. Execute the on-disk Clone
View Full Code Here

  public void setConf(Configuration conf) {
    super.setConf(conf);
    try {
      long cacheRefreshPeriod = conf.getLong(HFILE_CACHE_REFRESH_PERIOD_CONF_KEY,
        DEFAULT_HFILE_CACHE_REFRESH_PERIOD);
      final FileSystem fs = FSUtils.getCurrentFileSystem(conf);
      Path rootDir = FSUtils.getRootDir(conf);
      cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod,
          "snapshot-hfile-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
            public Collection<String> filesUnderSnapshot(final Path snapshotDir)
                throws IOException {
View Full Code Here

  public void setConf(Configuration conf) {
    super.setConf(conf);
    try {
      long cacheRefreshPeriod = conf.getLong(
        HLOG_CACHE_REFRESH_PERIOD_CONF_KEY, DEFAULT_HLOG_CACHE_REFRESH_PERIOD);
      final FileSystem fs = FSUtils.getCurrentFileSystem(conf);
      Path rootDir = FSUtils.getRootDir(conf);
      cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod,
          "snapshot-log-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
            public Collection<String> filesUnderSnapshot(final Path snapshotDir)
                throws IOException {
View Full Code Here

   * the DistributedCache.
   */
  public static void addDependencyJars(Configuration conf,
      Class... classes) throws IOException {

    FileSystem localFs = FileSystem.getLocal(conf);

    Set<String> jars = new HashSet<String>();

    // Add jars that are already in the tmpjars variable
    jars.addAll( conf.getStringCollection("tmpjars") );

    // Add jars containing the specified classes
    for (Class clazz : classes) {
      if (clazz == null) continue;

      String pathStr = findOrCreateJar(clazz);
      if (pathStr == null) {
        LOG.warn("Could not find jar for class " + clazz +
                 " in order to ship it to the cluster.");
        continue;
      }
      Path path = new Path(pathStr);
      if (!localFs.exists(path)) {
        LOG.warn("Could not validate jar file " + path + " for class "
                 + clazz);
        continue;
      }
      jars.add(path.makeQualified(localFs).toString());
View Full Code Here

  public static void debugLsr(Configuration conf,
      Path p, ErrorReporter errors) throws IOException {
    if (!LOG.isDebugEnabled() || p == null) {
      return;
    }
    FileSystem fs = p.getFileSystem(conf);

    if (!fs.exists(p)) {
      // nothing
      return;
    }
    errors.print(p.toString());

    if (fs.isFile(p)) {
      return;
    }

    if (fs.getFileStatus(p).isDir()) {
      FileStatus[] fss= fs.listStatus(p);
      for (FileStatus status : fss) {
        debugLsr(conf, status.getPath(), errors);
      }
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FileSystem$Cache$Key

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.