Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.TableName


  private void durabilityTest(String method, Durability tableDurability,
      Durability mutationDurability, long timeout, boolean expectAppend, final boolean expectSync,
      final boolean expectSyncFromLogSyncer) throws Exception {
    method = method + "_" + tableDurability.name() + "_" + mutationDurability.name();
    TableName tableName = TableName.valueOf(method);
    byte[] family = Bytes.toBytes("family");
    Path logDir = new Path(new Path(DIR + method), "log");
    HLog hlog = HLogFactory.createHLog(fs, logDir, UUID.randomUUID().toString(), conf);
    final HLog log = spy(hlog);
    this.region = initHRegion(tableName.getName(), HConstants.EMPTY_START_ROW,
        HConstants.EMPTY_END_ROW, method, conf, false, tableDurability, log,
        new byte[][] { family });

    Put put = new Put(Bytes.toBytes("r1"));
    put.add(family, Bytes.toBytes("q1"), Bytes.toBytes("v1"));
View Full Code Here


    table.close();
  }

  @Test
  public void testCloneSnapshotOfCloned() throws IOException, InterruptedException {
    TableName clonedTableName =
        TableName.valueOf("clonedtb-" + System.currentTimeMillis());
    admin.cloneSnapshot(snapshotName0, clonedTableName);
    SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
    admin.disableTable(clonedTableName);
    admin.snapshot(snapshotName2, clonedTableName);
View Full Code Here

  }

  @Test
  public void testCorruptedSnapshot() throws IOException, InterruptedException {
    SnapshotTestingUtils.corruptSnapshot(TEST_UTIL, Bytes.toString(snapshotName0));
    TableName cloneName = TableName.valueOf("corruptedClone-" + System.currentTimeMillis());
    try {
      admin.cloneSnapshot(snapshotName0, cloneName);
      fail("Expected CorruptedSnapshotException, got succeeded cloneSnapshot()");
    } catch (CorruptedSnapshotException e) {
      // Got the expected corruption exception.
View Full Code Here

     * cause a minor compaction.
     */
    TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3);

    String tableName = "testAdvancedConfigOverride";
    TableName TABLE =
        TableName.valueOf(tableName);
    HTable hTable = TEST_UTIL.createTable(TABLE, FAMILY, 10);
    HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
    HConnection connection = HConnectionManager.getConnection(TEST_UTIL
        .getConfiguration());

    // Create 3 store files.
    byte[] row = Bytes.toBytes(random.nextInt());
    performMultiplePutAndFlush(admin, hTable, row, FAMILY, 3, 100);

    // Verify we have multiple store files.
    HRegionLocation loc = hTable.getRegionLocation(row, true);
    byte[] regionName = loc.getRegionInfo().getRegionName();
    AdminProtos.AdminService.BlockingInterface server =
      connection.getAdmin(loc.getServerName());
    assertTrue(ProtobufUtil.getStoreFiles(
      server, regionName, FAMILY).size() > 1);

    // Issue a compaction request
    admin.compact(TABLE.getName());

    // poll wait for the compactions to happen
    for (int i = 0; i < 10 * 1000 / 40; ++i) {
      // The number of store files after compaction should be lesser.
      loc = hTable.getRegionLocation(row, true);
      if (!loc.getRegionInfo().isOffline()) {
        regionName = loc.getRegionInfo().getRegionName();
        server = connection.getAdmin(loc.getServerName());
        if (ProtobufUtil.getStoreFiles(
            server, regionName, FAMILY).size() <= 1) {
          break;
        }
      }
      Thread.sleep(40);
    }
    // verify the compactions took place and that we didn't just time out
    assertTrue(ProtobufUtil.getStoreFiles(
      server, regionName, FAMILY).size() <= 1);

    // change the compaction.min config option for this table to 5
    LOG.info("hbase.hstore.compaction.min should now be 5");
    HTableDescriptor htd = new HTableDescriptor(hTable.getTableDescriptor());
    htd.setValue("hbase.hstore.compaction.min", String.valueOf(5));
    admin.modifyTable(TABLE, htd);
    Pair<Integer, Integer> st;
    while (null != (st = admin.getAlterStatus(TABLE)) && st.getFirst() > 0) {
      LOG.debug(st.getFirst() + " regions left to update");
      Thread.sleep(40);
    }
    LOG.info("alter status finished");

    // Create 3 more store files.
    performMultiplePutAndFlush(admin, hTable, row, FAMILY, 3, 10);

    // Issue a compaction request
    admin.compact(TABLE.getName());

    // This time, the compaction request should not happen
    Thread.sleep(10 * 1000);
    loc = hTable.getRegionLocation(row, true);
    regionName = loc.getRegionInfo().getRegionName();
    server = connection.getAdmin(loc.getServerName());
    int sfCount = ProtobufUtil.getStoreFiles(
      server, regionName, FAMILY).size();
    assertTrue(sfCount > 1);

    // change an individual CF's config option to 2 & online schema update
    LOG.info("hbase.hstore.compaction.min should now be 2");
    HColumnDescriptor hcd = new HColumnDescriptor(htd.getFamily(FAMILY));
    hcd.setValue("hbase.hstore.compaction.min", String.valueOf(2));
    htd.addFamily(hcd);
    admin.modifyTable(TABLE, htd);
    while (null != (st = admin.getAlterStatus(TABLE)) && st.getFirst() > 0) {
      LOG.debug(st.getFirst() + " regions left to update");
      Thread.sleep(40);
    }
    LOG.info("alter status finished");

    // Issue a compaction request
    admin.compact(TABLE.getName());

    // poll wait for the compactions to happen
    for (int i = 0; i < 10 * 1000 / 40; ++i) {
      loc = hTable.getRegionLocation(row, true);
      regionName = loc.getRegionInfo().getRegionName();
View Full Code Here

    private boolean resubmitFailedPut(PutStatus failedPutStatus,
        HRegionLocation oldLoc) throws IOException {
      Put failedPut = failedPutStatus.getPut();
      // The currentPut is failed. So get the table name for the currentPut.
      TableName tableName = failedPutStatus.getRegionInfo().getTable();
      // Decrease the retry count
      int retryCount = failedPutStatus.getRetryCount() - 1;
     
      if (retryCount <= 0) {
        // Update the failed counter and no retry any more.
View Full Code Here

  }

  @Test
  public void testSkipRecoveredEditsReplay() throws Exception {
    String method = "testSkipRecoveredEditsReplay";
    TableName tableName = TableName.valueOf(method);
    byte[] family = Bytes.toBytes("family");
    this.region = initHRegion(tableName, method, conf, family);
    try {
      Path regiondir = region.getRegionFileSystem().getRegionDir();
      FileSystem fs = region.getRegionFileSystem().getFileSystem();
View Full Code Here

  }

  @Test
  public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception {
    String method = "testSkipRecoveredEditsReplaySomeIgnored";
    TableName tableName = TableName.valueOf(method);
    byte[] family = Bytes.toBytes("family");
    this.region = initHRegion(tableName, method, conf, family);
    try {
      Path regiondir = region.getRegionFileSystem().getRegionDir();
      FileSystem fs = region.getRegionFileSystem().getFileSystem();
View Full Code Here

  }

  @Test
  public void testRecoveredEditsReplayCompaction() throws Exception {
    String method = name.getMethodName();
    TableName tableName = TableName.valueOf(method);
    byte[] family = Bytes.toBytes("family");
    this.region = initHRegion(tableName, method, conf, family);
    try {
      Path regiondir = region.getRegionFileSystem().getRegionDir();
      FileSystem fs = region.getRegionFileSystem().getFileSystem();
View Full Code Here

    }
  }

  @Test
  public void testGetWhileRegionClose() throws IOException {
    TableName tableName = TableName.valueOf(name.getMethodName());
    Configuration hc = initSplit();
    int numRows = 100;
    byte[][] families = { fam1, fam2, fam3 };

    // Setting up region
View Full Code Here

    }
  }

  @Test
  public void testCheckAndPut_wrongRowInPut() throws IOException {
    TableName tableName = TableName.valueOf(name.getMethodName());
    this.region = initHRegion(tableName, this.getName(), conf, COLUMNS);
    try {
      Put put = new Put(row2);
      put.add(fam1, qual1, value1);
      try {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.TableName

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.