Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileSystem$Cache


    String regionEncodedName) throws IOException {
    HDFSBlocksDistribution hdfsBlocksDistribution =
      new HDFSBlocksDistribution();
    Path tablePath = FSUtils.getTablePath(FSUtils.getRootDir(conf),
      tableDescriptor.getName());
    FileSystem fs = tablePath.getFileSystem(conf);

    for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
      Path storeHomeDir = Store.getStoreHomedir(tablePath, regionEncodedName,
      family.getName());
      if (!fs.exists(storeHomeDir))continue;

      FileStatus[] hfilesStatus = null;
      hfilesStatus = fs.listStatus(storeHomeDir);

      for (FileStatus hfileStatus : hfilesStatus) {
        HDFSBlocksDistribution storeFileBlocksDistribution =
          FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0,
          hfileStatus.getLen());
View Full Code Here


        " Table name == " + info.getTableNameAsString());

    Path tableDir =
        HTableDescriptor.getTableDir(rootDir, info.getTableName());
    Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName());
    FileSystem fs = FileSystem.get(conf);
    fs.mkdirs(regionDir);
    // Write HRI to a file in case we need to recover .META.
    writeRegioninfoOnFilesystem(info, regionDir, fs, conf);
    HLog effectiveHLog = hlog;
    if (hlog == null && !ignoreHLog) {
      effectiveHLog = new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME),
View Full Code Here

    if (info == null) {
      throw new NullPointerException("Passed region info is null");
    }
    Path dir = HTableDescriptor.getTableDir(FSUtils.getRootDir(conf),
      info.getTableName());
    FileSystem fs = null;
    if (rsServices != null) {
      fs = rsServices.getFileSystem();
    }
    if (fs == null) {
      fs = FileSystem.get(conf);
View Full Code Here

    if (!a.getRegionInfo().getTableNameAsString().equals(
        b.getRegionInfo().getTableNameAsString())) {
      throw new IOException("Regions do not belong to the same table");
    }

    FileSystem fs = a.getFilesystem();

    // Make sure each region's cache is empty

    a.flushcache();
    b.flushcache();

    // Compact each region so we only have one store file per family

    a.compactStores(true);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Files for region: " + a);
      listPaths(fs, a.getRegionDir());
    }
    b.compactStores(true);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Files for region: " + b);
      listPaths(fs, b.getRegionDir());
    }

    Configuration conf = a.getConf();
    HTableDescriptor tabledesc = a.getTableDesc();
    HLog log = a.getLog();
    Path tableDir = a.getTableDir();
    // Presume both are of same region type -- i.e. both user or catalog
    // table regions.  This way can use comparator.
    final byte[] startKey =
      (a.comparator.matchingRows(a.getStartKey(), 0, a.getStartKey().length,
           HConstants.EMPTY_BYTE_ARRAY, 0, HConstants.EMPTY_BYTE_ARRAY.length)
       || b.comparator.matchingRows(b.getStartKey(), 0,
              b.getStartKey().length, HConstants.EMPTY_BYTE_ARRAY, 0,
              HConstants.EMPTY_BYTE_ARRAY.length))
      ? HConstants.EMPTY_BYTE_ARRAY
      : (a.comparator.compareRows(a.getStartKey(), 0, a.getStartKey().length,
             b.getStartKey(), 0, b.getStartKey().length) <= 0
         ? a.getStartKey()
         : b.getStartKey());
    final byte[] endKey =
      (a.comparator.matchingRows(a.getEndKey(), 0, a.getEndKey().length,
           HConstants.EMPTY_BYTE_ARRAY, 0, HConstants.EMPTY_BYTE_ARRAY.length)
       || a.comparator.matchingRows(b.getEndKey(), 0, b.getEndKey().length,
              HConstants.EMPTY_BYTE_ARRAY, 0,
              HConstants.EMPTY_BYTE_ARRAY.length))
      ? HConstants.EMPTY_BYTE_ARRAY
      : (a.comparator.compareRows(a.getEndKey(), 0, a.getEndKey().length,
             b.getEndKey(), 0, b.getEndKey().length) <= 0
         ? b.getEndKey()
         : a.getEndKey());

    HRegionInfo newRegionInfo =
        new HRegionInfo(tabledesc.getName(), startKey, endKey);
    LOG.info("Creating new region " + newRegionInfo.toString());
    String encodedName = newRegionInfo.getEncodedName();
    Path newRegionDir = HRegion.getRegionDir(a.getTableDir(), encodedName);
    if(fs.exists(newRegionDir)) {
      throw new IOException("Cannot merge; target file collision at " +
          newRegionDir);
    }
    fs.mkdirs(newRegionDir);

    LOG.info("starting merge of regions: " + a + " and " + b +
      " into new region " + newRegionInfo.toString() +
        " with start key <" + Bytes.toStringBinary(startKey) + "> and end key <" +
        Bytes.toStringBinary(endKey) + ">");
View Full Code Here

      }
      majorCompact = true;
    }
    final Path tableDir = new Path(args[0]);
    final Configuration c = HBaseConfiguration.create();
    final FileSystem fs = FileSystem.get(c);
    final Path logdir = new Path(c.get("hbase.tmp.dir"),
        "hlog" + tableDir.getName()
        + EnvironmentEdgeManager.currentTimeMillis());
    final Path oldLogDir = new Path(c.get("hbase.tmp.dir"),
        HConstants.HREGION_OLDLOGDIR_NAME);
View Full Code Here

    verifyReadWrite(checkAndPut);
  }

  @Test
  public void testBulkLoad() throws Exception {
    FileSystem fs = TEST_UTIL.getTestFileSystem();
    final Path dir = TEST_UTIL.getDataTestDir("testBulkLoad");
    fs.mkdirs(dir);
    //need to make it globally writable
    //so users creating HFiles have write permissions
    fs.setPermission(dir, FsPermission.valueOf("-rwxrwxrwx"));

    PrivilegedExceptionAction bulkLoadAction = new PrivilegedExceptionAction() {
      public Object run() throws Exception {
        int numRows = 3;
View Full Code Here

        }
    }

    private void createTempDir(Cluster cluster, Path coordPath) throws FalconException {
        try {
            FileSystem fs = coordPath.getFileSystem(ClusterHelper.getConfiguration(cluster));
            Path tempDir = new Path(coordPath, "../../logs");
            fs.mkdirs(tempDir);
            fs.setPermission(tempDir, new FsPermission((short) 511));
        } catch (Exception e) {
            throw new FalconException("Unable to create temp dir in " + coordPath, e);
        }
    }
View Full Code Here

    assertEquals(1, servingRegions.size());
    HRegion region = servingRegions.get(0);

    // get the parent RS and monitor
    HRegionServer hrs = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
    FileSystem fs = hrs.getFileSystem();

    // put some data on the region
    LOG.debug("-------Loading table");
    UTIL.loadRegion(region, TEST_FAM);
View Full Code Here

    assertEquals(1, servingRegions.size());
    HRegion region = servingRegions.get(0);

    // get the parent RS and monitor
    HRegionServer hrs = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
    FileSystem fs = hrs.getFileSystem();

    // put some data on the region
    LOG.debug("-------Loading table");
    UTIL.loadRegion(region, TEST_FAM);
View Full Code Here

  public void testCleaningRace() throws Exception {
    final long TEST_TIME = 20 * 1000;

    Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
    Path rootDir = UTIL.getDataTestDir("testCleaningRace");
    FileSystem fs = UTIL.getTestFileSystem();

    Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
    Path regionDir = new Path("table", "abcdef");
    Path familyDir = new Path(regionDir, "cf");

    Path sourceRegionDir = new Path(rootDir, regionDir);
    fs.mkdirs(sourceRegionDir);

    Stoppable stoppable = new StoppableImplementation();

    // The cleaner should be looping without long pauses to reproduce the race condition.
    HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir);
    try {
      cleaner.start();

      // Keep creating/archiving new files while the cleaner is running in the other thread
      long startTime = System.currentTimeMillis();
      for (long fid = 0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) {
        Path file = new Path(familyDir,  String.valueOf(fid));
        Path sourceFile = new Path(rootDir, file);
        Path archiveFile = new Path(archiveDir, file);

        fs.createNewFile(sourceFile);

        try {
          // Try to archive the file
          HFileArchiver.archiveRegion(fs, rootDir,
              sourceRegionDir.getParent(), sourceRegionDir);

          // The archiver succeded, the file is no longer in the original location
          // but it's in the archive location.
          LOG.debug("hfile=" + fid + " should be in the archive");
          assertTrue(fs.exists(archiveFile));
          assertFalse(fs.exists(sourceFile));
        } catch (IOException e) {
          // The archiver is unable to archive the file. Probably HBASE-7643 race condition.
          // in this case, the file should not be archived, and we should have the file
          // in the original location.
          LOG.debug("hfile=" + fid + " should be in the source location");
          assertFalse(fs.exists(archiveFile));
          assertTrue(fs.exists(sourceFile));

          // Avoid to have this file in the next run
          fs.delete(sourceFile, false);
        }
      }
    } finally {
      stoppable.stop("test end");
      cleaner.join();
      fs.delete(rootDir, true);
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FileSystem$Cache

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.