Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.TableName


  /**
   * Test -noHdfsChecking option can detect and fix assignments issue.
   */
  @Test
  public void testFixAssignmentsAndNoHdfsChecking() throws Exception {
    TableName table =
        TableName.valueOf("testFixAssignmentsAndNoHdfsChecking");
    try {
      setupTable(table);
      assertEquals(ROWKEYS.length, countRows());

View Full Code Here


   * However, it can not fix it without checking Hdfs because we need to get
   * the region info from Hdfs in this case, then to patch the meta.
   */
  @Test
  public void testFixMetaNotWorkingWithNoHdfsChecking() throws Exception {
    TableName table =
        TableName.valueOf("testFixMetaNotWorkingWithNoHdfsChecking");
    try {
      setupTable(table);
      assertEquals(ROWKEYS.length, countRows());

View Full Code Here

   * Test -fixHdfsHoles doesn't work with -noHdfsChecking option,
   * and -noHdfsChecking can't detect orphan Hdfs region.
   */
  @Test
  public void testFixHdfsHolesNotWorkingWithNoHdfsChecking() throws Exception {
    TableName table =
        TableName.valueOf("testFixHdfsHolesNotWorkingWithNoHdfsChecking");
    try {
      setupTable(table);
      assertEquals(ROWKEYS.length, countRows());

View Full Code Here

    testSplitBeforeSettingSplittingInZKInternals();
  }

  @Test(timeout = 60000)
  public void testTableExistsIfTheSpecifiedTableRegionIsSplitParent() throws Exception {
    final TableName tableName =
        TableName.valueOf("testTableExistsIfTheSpecifiedTableRegionIsSplitParent");
    // Create table then get the single region for our new table.
    HTable t = createTableAndWait(tableName.getName(), CF);
    try {
      List<HRegion> regions = cluster.getRegions(tableName);
      int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName());
      HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
      insertData(tableName.getName(), admin, t);
      // Turn off balancer so it doesn't cut in and mess up our placements.
      admin.setBalancerRunning(false, true);
      // Turn off the meta scanner so it don't remove parent on us.
      cluster.getMaster().setCatalogJanitorEnabled(false);
      boolean tableExists = MetaReader.tableExists(regionServer.getCatalogTracker(),
View Full Code Here

  /**
   * This creates a table and then corrupts an hfile.  Hbck should quarantine the file.
   */
  @Test(timeout=120000)
  public void testQuarantineCorruptHFile() throws Exception {
    TableName table = TableName.valueOf(name.getMethodName());
    try {
      setupTable(table);
      assertEquals(ROWKEYS.length, countRows());
      TEST_UTIL.getHBaseAdmin().flush(table.getName()); // flush is async.

      FileSystem fs = FileSystem.get(conf);
      Path hfile = getFlushedHFile(fs, table);

      // Mess it up by leaving a hole in the assignment, meta, and hdfs data
View Full Code Here

   * This creates a table and simulates the race situation where a concurrent compaction or split
   * has removed an hfile after the corruption checker learned about it.
   */
  @Test(timeout=120000)
  public void testQuarantineMissingHFile() throws Exception {
    TableName table = TableName.valueOf(name.getMethodName());
    ExecutorService exec = new ScheduledThreadPoolExecutor(10);
    // inject a fault in the hfcc created.
    final FileSystem fs = FileSystem.get(conf);
    HBaseFsck hbck = new HBaseFsck(conf, exec) {
      @Override
View Full Code Here

    public void deleteCachedRegionLocation(final HRegionLocation location) {
      if (location == null) {
        return;
      }
      synchronized (this.cachedRegionLocations) {
        TableName tableName = location.getRegionInfo().getTable();
        Map<byte[], HRegionLocation> tableLocations =
            getTableLocations(tableName);
        if (!tableLocations.isEmpty()) {
          // Delete if there's something in the cache for this region.
          HRegionLocation removedLocation =
View Full Code Here

    kvs.add(kv);

    HLog.Writer writer = null;
    HLog.Reader reader = null;
    // a regular table
    TableName t = TableName.valueOf("t");
    HRegionInfo tRegionInfo = null;
    int logCount = 0;
    long timestamp = System.currentTimeMillis();
    Path path = new Path(dir, "t");
    try {
View Full Code Here

          throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
        } else {
          flush(regionServerPair.getSecond(), regionServerPair.getFirst());
        }
      } else {
        final TableName tableName = checkTableExists(
            TableName.valueOf(tableNameOrRegionName), ct);
        List<Pair<HRegionInfo, ServerName>> pairs =
          MetaReader.getTableRegionsAndLocations(ct,
              tableName);
        for (Pair<HRegionInfo, ServerName> pair: pairs) {
View Full Code Here

          throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
        } else {
          compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily);
        }
      } else {
        final TableName tableName =
            checkTableExists(TableName.valueOf(tableNameOrRegionName), ct);
        List<Pair<HRegionInfo, ServerName>> pairs =
          MetaReader.getTableRegionsAndLocations(ct,
              tableName);
        for (Pair<HRegionInfo, ServerName> pair: pairs) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.TableName

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.