Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileSystem$Cache$Key


  public void testCleaningRace() throws Exception {
    final long TEST_TIME = 20 * 1000;

    Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
    Path rootDir = UTIL.getDataTestDir("testCleaningRace");
    FileSystem fs = UTIL.getTestFileSystem();

    Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
    Path regionDir = new Path("table", "abcdef");
    Path familyDir = new Path(regionDir, "cf");

    Path sourceRegionDir = new Path(rootDir, regionDir);
    fs.mkdirs(sourceRegionDir);

    Stoppable stoppable = new StoppableImplementation();

    // The cleaner should be looping without long pauses to reproduce the race condition.
    HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir);
    try {
      cleaner.start();

      // Keep creating/archiving new files while the cleaner is running in the other thread
      long startTime = System.currentTimeMillis();
      for (long fid = 0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) {
        Path file = new Path(familyDir,  String.valueOf(fid));
        Path sourceFile = new Path(rootDir, file);
        Path archiveFile = new Path(archiveDir, file);

        fs.createNewFile(sourceFile);

        try {
          // Try to archive the file
          HFileArchiver.archiveRegion(fs, rootDir,
              sourceRegionDir.getParent(), sourceRegionDir);

          // The archiver succeded, the file is no longer in the original location
          // but it's in the archive location.
          LOG.debug("hfile=" + fid + " should be in the archive");
          assertTrue(fs.exists(archiveFile));
          assertFalse(fs.exists(sourceFile));
        } catch (IOException e) {
          // The archiver is unable to archive the file. Probably HBASE-7643 race condition.
          // in this case, the file should not be archived, and we should have the file
          // in the original location.
          LOG.debug("hfile=" + fid + " should be in the source location");
          assertFalse(fs.exists(archiveFile));
          assertTrue(fs.exists(sourceFile));

          // Avoid to have this file in the next run
          fs.delete(sourceFile, false);
        }
      }
    } finally {
      stoppable.stop("test end");
      cleaner.join();
      fs.delete(rootDir, true);
    }
  }
View Full Code Here


        new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
        tableName,
        new Boolean[] {false, false}
    );

    FileSystem fs = util.getTestFileSystem();
    final Path dir = util.getDataTestDir(testName).makeQualified(fs);
    Path familyDir = new Path(dir, Bytes.toString(A));

    createHFile(util.getConfiguration(), fs, new Path(familyDir,Bytes.toString(A)), A, A);
View Full Code Here

    conf.setInt("dfs.blocksize", 1024 * 1024);
    conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

    testUtil.startMiniDFSCluster(1);
    MiniDFSCluster cluster = testUtil.getDFSCluster();
    FileSystem fs = cluster.getFileSystem();
    assertEquals("hdfs", fs.getUri().getScheme());

    try {
      testLinkReadDuringRename(fs, testUtil.getDefaultRootDirPath());
    } finally {
      testUtil.shutdownMiniCluster();
View Full Code Here

   * even when the current file gets renamed.
   */
  @Test
  public void testLocalLinkReadDuringRename() throws IOException {
    HBaseTestingUtility testUtil = new HBaseTestingUtility();
    FileSystem fs = testUtil.getTestFileSystem();
    assertEquals("file", fs.getUri().getScheme());
    testLinkReadDuringRename(fs, testUtil.getDataTestDir());
  }
View Full Code Here

    conf.setInt("dfs.blocksize", 1024 * 1024);
    conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

    testUtil.startMiniDFSCluster(1);
    MiniDFSCluster cluster = testUtil.getDFSCluster();
    FileSystem fs = cluster.getFileSystem();
    assertEquals("hdfs", fs.getUri().getScheme());

    try {
      List<Path> files = new ArrayList<Path>();
      for (int i = 0; i < 3; i++) {
        Path path = new Path(String.format("test-data-%d", i));
        writeSomeData(fs, path, 1 << 20, (byte)i);
        files.add(path);
      }

      FileLink link = new FileLink(files);
      FSDataInputStream in = link.open(fs);
      try {
        byte[] data = new byte[8192];
        int n;

        // Switch to file 1
        n = in.read(data);
        dataVerify(data, n, (byte)0);
        fs.delete(files.get(0));
        skipBuffer(in, (byte)0);

        // Switch to file 2
        n = in.read(data);
        dataVerify(data, n, (byte)1);
        fs.delete(files.get(1));
        skipBuffer(in, (byte)1);

        // Switch to file 3
        n = in.read(data);
        dataVerify(data, n, (byte)2);
        fs.delete(files.get(2));
        skipBuffer(in, (byte)2);

        // No more files available
        try {
          n = in.read(data);
View Full Code Here

    // make sure we have the snapshot
    List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
      snapshot, TABLE_NAME);

    // make sure its a valid snapshot
    FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
    Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
    LOG.debug("FS state after snapshot:");
    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
      FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
View Full Code Here

    byte [] enabledTable = Bytes.toBytes("enabledTable");
    HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable);
    htdEnabled.addFamily(new HColumnDescriptor(FAMILY));

    FileSystem filesystem = FileSystem.get(conf);
    Path rootdir = filesystem.makeQualified(
        new Path(conf.get(HConstants.HBASE_DIR)));
    // Write the .tableinfo
    FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);

    HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null);
View Full Code Here

    };

    byte [] enabledTable = Bytes.toBytes("enabledTable");
    HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable);
    htdEnabled.addFamily(new HColumnDescriptor(FAMILY));
    FileSystem filesystem = FileSystem.get(conf);
    Path rootdir = filesystem.makeQualified(
           new Path(conf.get(HConstants.HBASE_DIR)));
    // Write the .tableinfo
    FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
    HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(),
        null, null);
View Full Code Here

        }

        if (regionInfoOnly) {
          LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString());
          Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
          FileSystem fs = rootDir.getFileSystem(conf);
          Path p = new Path(rootDir + "/" + htd.getNameAsString(), hri.getEncodedName());
          Path hriPath = new Path(p, HRegion.REGIONINFO_FILE);
          fs.delete(hriPath, true);
        }

        if (hdfs) {
          LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
          Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
          FileSystem fs = rootDir.getFileSystem(conf);
          Path p = new Path(rootDir + "/" + htd.getNameAsString(), hri.getEncodedName());
          HBaseFsck.debugLsr(conf, p);
          boolean success = fs.delete(p, true);
          LOG.info("Deleted " + p + " sucessfully? " + success);
          HBaseFsck.debugLsr(conf, p);
        }

        if (metaRow) {
View Full Code Here

  }

  @Test
  public void testHbckFixOrphanTable() throws Exception {
    String table = "tableInfo";
    FileSystem fs = null;
    Path tableinfo = null;
    try {
      setupTable(table);
      HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();

      Path hbaseTableDir = new Path(conf.get(HConstants.HBASE_DIR) + "/" + table );
      fs = hbaseTableDir.getFileSystem(conf);
      FileStatus status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
      tableinfo = status.getPath();
      fs.rename(tableinfo, new Path("/.tableinfo"));

      //to report error if .tableinfo is missing.
      HBaseFsck hbck = doFsck(conf, false);
      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NO_TABLEINFO_FILE });

      // fix OrphanTable with default .tableinfo (htd not yet cached on master)
      hbck = doFsck(conf, true);
      assertNoErrors(hbck);
      status = null;
      status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
      assertNotNull(status);

      HTableDescriptor htd = admin.getTableDescriptor(table.getBytes());
      htd.setValue("NOT_DEFAULT", "true");
      admin.disableTable(table);
      admin.modifyTable(table.getBytes(), htd);
      admin.enableTable(table);
      fs.delete(status.getPath(), true);

      // fix OrphanTable with cache
      htd = admin.getTableDescriptor(table.getBytes()); // warms up cached htd on master
      hbck = doFsck(conf, true);
      assertNoErrors(hbck);
      status = null;
      status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
      assertNotNull(status);
      htd = admin.getTableDescriptor(table.getBytes());
      assertEquals(htd.getValue("NOT_DEFAULT"), "true");
    } finally {
      fs.rename(new Path("/.tableinfo"), tableinfo);
      deleteTable(table);
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FileSystem$Cache$Key

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.