Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileSystem$Cache$Key


    validateQuery(query);
    validateSplitSize(numSplits);

    List<Query> splits = new ArrayList<Query>(numSplits);
    List<Key> scatterKeys = getScatterKeys(numSplits, query, datastore);
    Key lastKey = null;
    for (Key nextKey : getSplitKey(scatterKeys, numSplits)) {
      splits.add(createSplit(lastKey, nextKey, query));
      lastKey = nextKey;
    }
    splits.add(createSplit(lastKey, null, query));
View Full Code Here


    }
  }

  private void splitStoreFile(final StoreFile sf, final Path splitdir)
  throws IOException {
    FileSystem fs = this.parent.getFilesystem();
    byte [] family = sf.getFamily();
    String encoded = this.hri_a.getEncodedName();
    Path storedir = Store.getStoreHomedir(splitdir, encoded, family);
    StoreFile.split(fs, storedir, sf, this.splitrow, Range.bottom);
    encoded = this.hri_b.getEncodedName();
View Full Code Here

   */
  HRegion createDaughterRegion(final HRegionInfo hri,
      final RegionServerServices rsServices)
  throws IOException {
    // Package private so unit tests have access.
    FileSystem fs = this.parent.getFilesystem();
    Path regionDir = getSplitDirForDaughter(this.parent.getFilesystem(),
      this.splitdir, hri);
    HRegion r = HRegion.newHRegion(this.parent.getTableDir(),
      this.parent.getLog(), fs, this.parent.getConf(),
      hri, this.parent.getTableDesc(), rsServices);
View Full Code Here

   * of no return and so now need to abort the server to minimize damage.
   */
  public boolean rollback(final Server server, final RegionServerServices services)
  throws IOException {
    boolean result = true;
    FileSystem fs = this.parent.getFilesystem();
    ListIterator<JournalEntry> iterator =
      this.journal.listIterator(this.journal.size());
    // Iterate in reverse.
    while (iterator.hasPrevious()) {
      JournalEntry je = iterator.previous();
View Full Code Here

   * @param r
   * @throws IOException
   */
  static void cleanupAnySplitDetritus(final HRegion r) throws IOException {
    Path splitdir = getSplitDir(r);
    FileSystem fs = r.getFilesystem();
    if (!fs.exists(splitdir)) return;
    // Look at the splitdir.  It could have the encoded names of the daughter
    // regions we tried to make.  See if the daughter regions actually got made
    // out under the tabledir.  If here under splitdir still, then the split did
    // not complete.  Try and do cleanup.  This code WILL NOT catch the case
    // where we successfully created daughter a but regionserver crashed during
    // the creation of region b.  In this case, there'll be an orphan daughter
    // dir in the filesystem.  TOOD: Fix.
    FileStatus [] daughters = fs.listStatus(splitdir, new FSUtils.DirFilter(fs));
    for (int i = 0; i < daughters.length; i++) {
      cleanupDaughterRegion(fs, r.getTableDir(),
        daughters[i].getPath().getName());
    }
    cleanupSplitDir(r.getFilesystem(), splitdir);
View Full Code Here

  private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile,
      String archiveStartTime) throws IOException {
    // build path as it should be in the archive
    String filename = currentFile.getName();
    Path archiveFile = new Path(archiveDir, filename);
    FileSystem fs = currentFile.getFileSystem();

    // if the file already exists in the archive, move that one to a timestamped backup. This is a
    // really, really unlikely situtation, where we get the same name for the existing file, but
    // is included just for that 1 in trillion chance.
    if (fs.exists(archiveFile)) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("File:" + archiveFile + " already exists in archive, moving to "
            + "timestamped backup and overwriting current.");
      }

      // move the archive file to the stamped backup
      Path backedupArchiveFile = new Path(archiveDir, filename + SEPARATOR + archiveStartTime);
      if (!fs.rename(archiveFile, backedupArchiveFile)) {
        LOG.error("Could not rename archive file to backup: " + backedupArchiveFile
            + ", deleting existing file in favor of newer.");
        // try to delete the exisiting file, if we can't rename it
        if (!fs.delete(archiveFile, false)) {
          throw new IOException("Couldn't delete existing archive file (" + archiveFile
              + ") or rename it to the backup file (" + backedupArchiveFile
              + ") to make room for similarly named file.");
        }
      }
      LOG.debug("Backed up archive file from: " + archiveFile);
    }

    LOG.debug("No existing file in archive for:" + archiveFile +
        ", free to archive original file.");

    // at this point, we should have a free spot for the archive file
    boolean success = false;
    for (int i = 0; !success && i < DEFAULT_RETRIES_NUMBER; ++i) {
      if (i > 0) {
        // Ensure that the archive directory exists.
        // The previous "move to archive" operation has failed probably because
        // the cleaner has removed our archive directory (HBASE-7643).
        // (we're in a retry loop, so don't worry too much about the exception)
        try {
          if (!fs.exists(archiveDir)) {
            if (fs.mkdirs(archiveDir)) {
              LOG.debug("Created archive directory:" + archiveDir);
            }
          }
        } catch (IOException e) {
          LOG.warn("Failed to create the archive directory: " + archiveDir, e);
View Full Code Here

        if (overlapsToSideline > maxOverlapsToSideline) {
          overlapsToSideline = maxOverlapsToSideline;
        }
        List<HbckInfo> regionsToSideline =
          RegionSplitCalculator.findBigRanges(bigOverlap, overlapsToSideline);
        FileSystem fs = FileSystem.get(conf);
        for (HbckInfo regionToSideline: regionsToSideline) {
          try {
            LOG.info("Closing region: " + regionToSideline);
            closeRegion(regionToSideline);
          } catch (IOException ioe) {
View Full Code Here

   @Deprecated
  public HTableDescriptor getTableDesc() {
    Configuration c = HBaseConfiguration.create();
    c.set("fs.defaultFS", c.get(HConstants.HBASE_DIR));
    c.set("fs.default.name", c.get(HConstants.HBASE_DIR));
    FileSystem fs;
    try {
      fs = FileSystem.get(c);
    } catch (IOException e) {
      throw new RuntimeException(e);
    }
View Full Code Here

   * @deprecated Do not use; expensive call
   */
  @Deprecated
  public void setTableDesc(HTableDescriptor newDesc) {
    Configuration c = HBaseConfiguration.create();
    FileSystem fs;
    try {
      fs = FileSystem.get(c);
    } catch (IOException e) {
      throw new RuntimeException(e);
    }
View Full Code Here

   * and the number of the files to copy.
   */
  private static Path[] createInputFiles(final Configuration conf,
      final List<Pair<Path, Long>> snapshotFiles, int mappers)
      throws IOException, InterruptedException {
    FileSystem fs = FileSystem.get(conf);
    Path inputFolderPath = getInputFolderPath(fs, conf);
    LOG.debug("Input folder location: " + inputFolderPath);

    List<List<Path>> splits = getBalancedSplits(snapshotFiles, mappers);
    Path[] inputFiles = new Path[splits.size()];
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FileSystem$Cache$Key

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.