Examples of HFileCleaner


Examples of org.apache.hadoop.hbase.master.cleaner.HFileCleaner

    toCleanup.add(tableDir);

    Configuration conf = UTIL.getConfiguration();
    // setup the delegate
    Stoppable stop = new StoppableImplementation();
    HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
    List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
    final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);

    // create the region
    HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
View Full Code Here

Examples of org.apache.hadoop.hbase.master.cleaner.HFileCleaner

    toCleanup.add(tableDir);
    toCleanup.add(otherTableDir);
    Configuration conf = UTIL.getConfiguration();
    // setup the delegate
    Stoppable stop = new StoppableImplementation();
    HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
    List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
    final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);

    // create the region
    HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
    HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
    loadFlushAndCompact(region, TEST_FAM);

    // create the another table that we don't archive
    hcd = new HColumnDescriptor(TEST_FAM);
    HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd);
    loadFlushAndCompact(otherRegion, TEST_FAM);

    // get the current hfiles in the archive directory
    List<Path> files = getAllFiles(fs, archiveDir);
    if (files == null) {
      FSUtils.logFileSystemState(fs, archiveDir, LOG);
      throw new RuntimeException("Didn't load archive any files!");
    }

    // make sure we have files from both tables
    int initialCountForPrimary = 0;
    int initialCountForOtherTable = 0;
    for (Path file : files) {
      String tableName = file.getParent().getParent().getParent().getName();
      // check to which table this file belongs
      if (tableName.equals(otherTable)) initialCountForOtherTable++;
      else if (tableName.equals(STRING_TABLE_NAME)) initialCountForPrimary++;
    }

    assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0);
    assertTrue("Didn't archive files for:" + otherTable, initialCountForOtherTable > 0);

    // run the cleaners, checking for each of the directories + files (both should be deleted and
    // need to be checked) in 'otherTable' and the files (which should be retained) in the 'table'
    CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size() + 3);
    // run the cleaner
    cleaner.start();
    // wait for the cleaner to check all the files
    finished.await();
    // stop the cleaner
    stop.stop("");
View Full Code Here

Examples of org.apache.hadoop.hbase.master.cleaner.HFileCleaner

  private HFileCleaner setupAndCreateCleaner(Configuration conf, FileSystem fs, Path archiveDir,
      Stoppable stop) {
    conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
      LongTermArchivingHFileCleaner.class.getCanonicalName());
    return new HFileCleaner(1000, stop, conf, fs, archiveDir);
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.master.cleaner.HFileCleaner

    fs.mkdirs(sourceRegionDir);

    Stoppable stoppable = new StoppableImplementation();

    // The cleaner should be looping without long pauses to reproduce the race condition.
    HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir);
    try {
      cleaner.start();

      // Keep creating/archiving new files while the cleaner is running in the other thread
      long startTime = System.currentTimeMillis();
      for (long fid = 0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) {
        Path file = new Path(familyDir,  String.valueOf(fid));
        Path sourceFile = new Path(rootDir, file);
        Path archiveFile = new Path(archiveDir, file);

        fs.createNewFile(sourceFile);

        try {
          // Try to archive the file
          HFileArchiver.archiveRegion(fs, rootDir,
              sourceRegionDir.getParent(), sourceRegionDir);

          // The archiver succeded, the file is no longer in the original location
          // but it's in the archive location.
          LOG.debug("hfile=" + fid + " should be in the archive");
          assertTrue(fs.exists(archiveFile));
          assertFalse(fs.exists(sourceFile));
        } catch (IOException e) {
          // The archiver is unable to archive the file. Probably HBASE-7643 race condition.
          // in this case, the file should not be archived, and we should have the file
          // in the original location.
          LOG.debug("hfile=" + fid + " should be in the source location");
          assertFalse(fs.exists(archiveFile));
          assertTrue(fs.exists(sourceFile));

          // Avoid to have this file in the next run
          fs.delete(sourceFile, false);
        }
      }
    } finally {
      stoppable.stop("test end");
      cleaner.join();
      fs.delete(rootDir, true);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.master.cleaner.HFileCleaner

         getMasterFileSystem().getOldLogDir());
         Threads.setDaemonThreadRunning(logCleaner.getThread(), n + ".oldLogCleaner");

   //start the hfile archive cleaner thread
    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
        .getFileSystem(), archiveDir);
    Threads.setDaemonThreadRunning(hfileCleaner.getThread(), n + ".archivedHFileCleaner");

    // Start the health checker
    if (this.healthCheckChore != null) {
View Full Code Here

Examples of org.apache.hadoop.hbase.master.cleaner.HFileCleaner

    fs.mkdirs(sourceRegionDir);

    Stoppable stoppable = new StoppableImplementation();

    // The cleaner should be looping without long pauses to reproduce the race condition.
    HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir);
    try {
      cleaner.start();

      // Keep creating/archiving new files while the cleaner is running in the other thread
      long startTime = System.currentTimeMillis();
      for (long fid = 0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) {
        Path file = new Path(familyDir,  String.valueOf(fid));
        Path sourceFile = new Path(rootDir, file);
        Path archiveFile = new Path(archiveDir, file);

        fs.createNewFile(sourceFile);

        try {
          // Try to archive the file
          HFileArchiver.archiveRegion(fs, rootDir,
              sourceRegionDir.getParent(), sourceRegionDir);

          // The archiver succeded, the file is no longer in the original location
          // but it's in the archive location.
          LOG.debug("hfile=" + fid + " should be in the archive");
          assertTrue(fs.exists(archiveFile));
          assertFalse(fs.exists(sourceFile));
        } catch (IOException e) {
          // The archiver is unable to archive the file. Probably HBASE-7643 race condition.
          // in this case, the file should not be archived, and we should have the file
          // in the original location.
          LOG.debug("hfile=" + fid + " should be in the source location");
          assertFalse(fs.exists(archiveFile));
          assertTrue(fs.exists(sourceFile));

          // Avoid to have this file in the next run
          fs.delete(sourceFile, false);
        }
      }
    } finally {
      stoppable.stop("test end");
      cleaner.join();
      fs.delete(rootDir, true);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.master.cleaner.HFileCleaner

         Threads.setDaemonThreadRunning(logCleaner.getThread(),
           getServerName().toShortString() + ".oldLogCleaner");

   //start the hfile archive cleaner thread
    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
        .getFileSystem(), archiveDir);
    Threads.setDaemonThreadRunning(hfileCleaner.getThread(),
      getServerName().toShortString() + ".archivedHFileCleaner");

    serviceStarted = true;
View Full Code Here

Examples of org.apache.hadoop.hbase.master.cleaner.HFileCleaner

    toCleanup.add(tableDir);

    Configuration conf = UTIL.getConfiguration();
    // setup the delegate
    Stoppable stop = new StoppableImplementation();
    HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
    List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
    final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);

    // create the region
    HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
View Full Code Here

Examples of org.apache.hadoop.hbase.master.cleaner.HFileCleaner

    toCleanup.add(tableDir);
    toCleanup.add(otherTableDir);
    Configuration conf = UTIL.getConfiguration();
    // setup the delegate
    Stoppable stop = new StoppableImplementation();
    HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
    List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
    final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);

    // create the region
    HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
    HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
    loadFlushAndCompact(region, TEST_FAM);

    // create the another table that we don't archive
    hcd = new HColumnDescriptor(TEST_FAM);
    HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd);
    loadFlushAndCompact(otherRegion, TEST_FAM);

    // get the current hfiles in the archive directory
    List<Path> files = getAllFiles(fs, archiveDir);
    if (files == null) {
      FSUtils.logFileSystemState(fs, archiveDir, LOG);
      throw new RuntimeException("Didn't load archive any files!");
    }

    // make sure we have files from both tables
    int initialCountForPrimary = 0;
    int initialCountForOtherTable = 0;
    for (Path file : files) {
      String tableName = file.getParent().getParent().getParent().getName();
      // check to which table this file belongs
      if (tableName.equals(otherTable)) initialCountForOtherTable++;
      else if (tableName.equals(STRING_TABLE_NAME)) initialCountForPrimary++;
    }

    assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0);
    assertTrue("Didn't archive files for:" + otherTable, initialCountForOtherTable > 0);

    // run the cleaners, checking for each of the directories + files (both should be deleted and
    // need to be checked) in 'otherTable' and the files (which should be retained) in the 'table'
    CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size() + 3);
    // run the cleaner
    cleaner.start();
    // wait for the cleaner to check all the files
    finished.await();
    // stop the cleaner
    stop.stop("");
View Full Code Here

Examples of org.apache.hadoop.hbase.master.cleaner.HFileCleaner

  private HFileCleaner setupAndCreateCleaner(Configuration conf, FileSystem fs, Path archiveDir,
      Stoppable stop) {
    conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
      LongTermArchivingHFileCleaner.class.getCanonicalName());
    return new HFileCleaner(1000, stop, conf, fs, archiveDir);
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.