Examples of LoadTestTool


Examples of org.apache.hadoop.hbase.util.LoadTestTool

    LOG.debug("Initializing/checking cluster has " + numSlavesBase + " servers");
    util.initializeCluster(numSlavesBase);
    LOG.debug("Done initializing/checking cluster");
    cluster = util.getHBaseClusterInterface();
    deleteTableIfNecessary();
    loadTool = new LoadTestTool();
    loadTool.setConf(util.getConfiguration());
    // Initialize load test tool before we start breaking things;
    // LoadTestTool init, even when it is a no-op, is very fragile.
    int ret = loadTool.run(new String[] { "-tn", getTablename(), "-init_only" });
    Assert.assertEquals("Failed to initialize LoadTestTool", 0, ret);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.LoadTestTool

    // Make sure there are three servers.
    util.initializeCluster(3);

    // Set up the load test tool.
    loadTool = new LoadTestTool();
    loadTool.setConf(util.getConfiguration());

    // Create executor with enough threads to restart rs's,
    // run scans, puts, admin ops and load test tool.
    executorService = Executors.newFixedThreadPool(8);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.LoadTestTool

    LOG.debug("Initializing/checking cluster has " + SERVER_COUNT + " servers");
    util.initializeCluster(SERVER_COUNT);
    LOG.debug("Done initializing/checking cluster");
    cluster = util.getHBaseClusterInterface();
    deleteTableIfNecessary();
    loadTool = new LoadTestTool();
    loadTool.setConf(util.getConfiguration());
    // Initialize load test tool before we start breaking things;
    // LoadTestTool init, even when it is a no-op, is very fragile.
    initTable();
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.util.LoadTestTool

    LOG.info("Initializing cluster with " + numSlavesBase + " servers");
    util.initializeCluster(numSlavesBase);
    LOG.info("Done initializing cluster");
    cluster = util.getHBaseClusterInterface();
    deleteTableIfNecessary();
    loadTool = new LoadTestTool();
    loadTool.setConf(util.getConfiguration());
    // Initialize load test tool before we start breaking things;
    // LoadTestTool init, even when it is a no-op, is very fragile.
    int ret = loadTool.run(new String[] { "-tn", tableName, "-init_only" });
    Assert.assertEquals("Failed to initialize LoadTestTool", 0, ret);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.LoadTestTool

    LOG.debug("Initializing/checking cluster has " + SERVER_COUNT + " servers");
    util.initializeCluster(SERVER_COUNT);
    LOG.debug("Done initializing/checking cluster");
    cluster = util.getHBaseClusterInterface();
    deleteTableIfNecessary();
    loadTool = new LoadTestTool();
    loadTool.setConf(util.getConfiguration());
    // Initialize load test tool before we start breaking things;
    // LoadTestTool init, even when it is a no-op, is very fragile.
    int ret = loadTool.run(new String[] { "-tn", getTablename(), "-init_only" });
    Assert.assertEquals("Failed to initialize LoadTestTool", 0, ret);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.LoadTestTool

    LOG.debug("Initializing/checking cluster has " + numSlavesBase + " servers");
    util.initializeCluster(numSlavesBase);
    LOG.debug("Done initializing/checking cluster");
    cluster = util.getHBaseClusterInterface();
    deleteTableIfNecessary();
    loadTool = new LoadTestTool();
    loadTool.setConf(util.getConfiguration());
    // Initialize load test tool before we start breaking things;
    // LoadTestTool init, even when it is a no-op, is very fragile.
    int ret = loadTool.run(new String[] { "-tn", tableName, "-init_only" });
    Assert.assertEquals("Failed to initialize LoadTestTool", 0, ret);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.LoadTestTool

    // Make sure there are three servers.
    util.initializeCluster(3);

    // Set up the load test tool.
    loadTool = new LoadTestTool();
    loadTool.setConf(util.getConfiguration());

    // Create executor with enough threads to restart rs's,
    // run scans, puts, admin ops and load test tool.
    executorService = Executors.newFixedThreadPool(8);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.LoadTestTool

    // force splits
    // concurrently until we have 5 regions. verify the data just in case.
    // Every region should contain the same table descriptor
    // This is not an exact test
    prepareMiniCluster();
    LoadTestTool loadTool = new LoadTestTool();
    loadTool.setConf(TEST_UTIL.getConfiguration());
    int numKeys = 10000;
    final TableName tableName = TableName.valueOf("testTableReadLock");
    final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
    final HTableDescriptor desc = new HTableDescriptor(tableName);
    final byte[] family = Bytes.toBytes("test_cf");
    desc.addFamily(new HColumnDescriptor(family));
    admin.createTable(desc); // create with one region

    // write some data, not much
    int ret = loadTool.run(new String[] { "-tn", tableName.getNameAsString(), "-write",
        String.format("%d:%d:%d", 1, 10, 10), "-num_keys", String.valueOf(numKeys), "-skip_init" });
    if (0 != ret) {
      String errorMsg = "Load failed with error code " + ret;
      LOG.error(errorMsg);
      fail(errorMsg);
    }

    int familyValues = admin.getTableDescriptor(tableName).getFamily(family).getValues().size();
    StoppableImplementation stopper = new StoppableImplementation();

    //alter table every 10 sec
    Chore alterThread = new Chore("Alter Chore", 10000, stopper) {
      @Override
      protected void chore() {
        Random random = new Random();
        try {
          HTableDescriptor htd = admin.getTableDescriptor(tableName);
          String val = String.valueOf(random.nextInt());
          htd.getFamily(family).setValue(val, val);
          desc.getFamily(family).setValue(val, val); // save it for later
                                                     // control
          admin.modifyTable(tableName, htd);
        } catch (Exception ex) {
          LOG.warn("Caught exception", ex);
          fail(ex.getMessage());
        }
      }
    };

    //split table every 5 sec
    Chore splitThread = new Chore("Split thread", 5000, stopper) {
      @Override
      public void chore() {
        try {
          HRegion region = TEST_UTIL.getSplittableRegion(tableName, -1);
          if (region != null) {
            byte[] regionName = region.getRegionName();
            admin.flush(regionName);
            admin.compact(regionName);
            admin.split(regionName);
          } else {
            LOG.warn("Could not find suitable region for the table.  Possibly the " +
              "region got closed and the attempts got over before " +
              "the region could have got reassigned.");
          }
        } catch (NotServingRegionException nsre) {
          // the region may be in transition
          LOG.warn("Caught exception", nsre);
        } catch (Exception ex) {
          LOG.warn("Caught exception", ex);
          fail(ex.getMessage());
        }
      }
    };

    alterThread.start();
    splitThread.start();
    while (true) {
      List<HRegionInfo> regions = admin.getTableRegions(tableName);
      LOG.info(String.format("Table #regions: %d regions: %s:", regions.size(), regions));
      assertEquals(admin.getTableDescriptor(tableName), desc);
      for (HRegion region : TEST_UTIL.getMiniHBaseCluster().getRegions(tableName)) {
        assertEquals(desc, region.getTableDesc());
      }
      if (regions.size() >= 5) {
        break;
      }
      Threads.sleep(1000);
    }
    stopper.stop("test finished");

    int newFamilyValues = admin.getTableDescriptor(tableName).getFamily(family).getValues().size();
    LOG.info(String.format("Altered the table %d times", newFamilyValues - familyValues));
    assertTrue(newFamilyValues > familyValues); // at least one alter went
                                                // through

    ret = loadTool.run(new String[] { "-tn", tableName.getNameAsString(), "-read", "100:10",
        "-num_keys", String.valueOf(numKeys), "-skip_init" });
    if (0 != ret) {
      String errorMsg = "Verify failed with error code " + ret;
      LOG.error(errorMsg);
      fail(errorMsg);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.LoadTestTool

    // force splits
    // concurrently until we have 10 regions. verify the data just in case.
    // Every region should contain the same table descriptor
    // This is not an exact test
    prepareMiniCluster();
    LoadTestTool loadTool = new LoadTestTool();
    loadTool.setConf(TEST_UTIL.getConfiguration());
    int numKeys = 10000;
    final byte[] tableName = Bytes.toBytes("testTableReadLock");
    final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
    final HTableDescriptor desc = new HTableDescriptor(tableName);
    final byte[] family = Bytes.toBytes("test_cf");
    desc.addFamily(new HColumnDescriptor(family));
    admin.createTable(desc); // create with one region

    // write some data, not much
    int ret = loadTool.run(new String[] { "-tn", Bytes.toString(tableName), "-write",
        String.format("%d:%d:%d", 1, 10, 10), "-num_keys", String.valueOf(numKeys), "-skip_init" });
    if (0 != ret) {
      String errorMsg = "Load failed with error code " + ret;
      LOG.error(errorMsg);
      fail(errorMsg);
    }

    int familyValues = admin.getTableDescriptor(tableName).getFamily(family).getValues().size();
    StoppableImplementation stopper = new StoppableImplementation();

    //alter table every 10 sec
    Chore alterThread = new Chore("Alter Chore", 10000, stopper) {
      @Override
      protected void chore() {
        Random random = new Random();
        try {
          HTableDescriptor htd = admin.getTableDescriptor(tableName);
          String val = String.valueOf(random.nextInt());
          htd.getFamily(family).setValue(val, val);
          desc.getFamily(family).setValue(val, val); // save it for later
                                                     // control
          admin.modifyTable(tableName, htd);
        } catch (Exception ex) {
          LOG.warn("Caught exception", ex);
          fail(ex.getMessage());
        }
      }
    };

    //split table every 5 sec
    Chore splitThread = new Chore("Split thread", 5000, stopper) {
      @Override
      public void chore() {
        try {
          HRegion region = TEST_UTIL.getSplittableRegion(tableName, -1);
          if (region != null) {
            byte[] regionName = region.getRegionName();
            admin.flush(regionName);
            admin.compact(regionName);
            admin.split(regionName);
          } else {
            LOG.warn("Could not find suitable region for the table.  Possibly the " +
              "region got closed and the attempts got over before " +
              "the region could have got reassigned.");
          }
        } catch (NotServingRegionException nsre) {
          // the region may be in transition
          LOG.warn("Caught exception", nsre);
        } catch (Exception ex) {
          LOG.warn("Caught exception", ex);
          fail(ex.getMessage());
        }
      }
    };

    alterThread.start();
    splitThread.start();
    while (true) {
      List<HRegionInfo> regions = admin.getTableRegions(tableName);
      LOG.info(String.format("Table #regions: %d regions: %s:", regions.size(), regions));
      assertEquals(admin.getTableDescriptor(tableName), desc);
      for (HRegion region : TEST_UTIL.getMiniHBaseCluster().getRegions(tableName)) {
        assertEquals(desc, region.getTableDesc());
      }
      if (regions.size() >= 10) {
        break;
      }
      Threads.sleep(1000);
    }
    stopper.stop("test finished");

    int newFamilyValues = admin.getTableDescriptor(tableName).getFamily(family).getValues().size();
    LOG.info(String.format("Altered the table %d times", newFamilyValues - familyValues));
    assertTrue(newFamilyValues > familyValues); // at least one alter went
                                                // through

    ret = loadTool.run(new String[] { "-tn", Bytes.toString(tableName), "-read", "100:10",
        "-num_keys", String.valueOf(numKeys), "-skip_init" });
    if (0 != ret) {
      String errorMsg = "Verify failed with error code " + ret;
      LOG.error(errorMsg);
      fail(errorMsg);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.LoadTestTool

    // Make sure there are three servers.
    util.initializeCluster(3);

    // Set up the load test tool.
    loadTool = new LoadTestTool();
    loadTool.setConf(util.getConfiguration());

    // Create executor with enough threads to restart rs's,
    // run scans, puts, admin ops and load test tool.
    executorService = Executors.newFixedThreadPool(8);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.