Examples of HRegionLocation


Examples of org.apache.hadoop.hbase.HRegionLocation

   *
   * @param tableName name of table to delete
   * @throws IOException if a remote or network exception occurs
   */
  public void deleteTable(final TableName tableName) throws IOException {
    HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
    boolean tableExists = true;

    executeCallable(new MasterCallable<Void>(getConnection()) {
      @Override
      public Void call() throws ServiceException {
        DeleteTableRequest req = RequestConverter.buildDeleteTableRequest(tableName);
        master.deleteTable(null,req);
        return null;
      }
    });

    // Wait until all regions deleted
    for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
      try {

        Scan scan = MetaReader.getScanForTableName(tableName);
        scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
        ScanRequest request = RequestConverter.buildScanRequest(
          firstMetaServer.getRegionInfo().getRegionName(), scan, 1, true);
        Result[] values = null;
        // Get a batch at a time.
        ClientService.BlockingInterface server = connection.getClient(firstMetaServer
            .getServerName());
        PayloadCarryingRpcController controller = new PayloadCarryingRpcController();
        try {
          controller.setPriority(tableName);
          ScanResponse response = server.scan(controller, request);
View Full Code Here

Examples of org.apache.hadoop.hbase.HRegionLocation

    HTable t = TEST_UTIL.createTable(nameBytes, HConstants.CATALOG_FAMILY);
    TEST_UTIL.createMultiRegions(t, HConstants.CATALOG_FAMILY);
    CatalogTracker ct = new CatalogTracker(TEST_UTIL.getConfiguration());
    ct.start();
    try {
      HRegionLocation regionLocation = t.getRegionLocation("mmm");
      HRegionInfo region = regionLocation.getRegionInfo();
      byte[] regionName = region.getRegionName();
      Pair<HRegionInfo, ServerName> pair = admin.getRegion(regionName, ct);
      assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
      pair = admin.getRegion(region.getEncodedNameAsBytes(), ct);
      assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
View Full Code Here

Examples of org.apache.hadoop.hbase.HRegionLocation

          continue;
        }

        Map<byte[], Long> maxStoreSequenceIds = null;
        boolean needSkip = false;
        HRegionLocation loc = null;
        String locKey = null;
        List<KeyValue> kvs = edit.getKeyValues();
        List<KeyValue> skippedKVs = new ArrayList<KeyValue>();
        HConnection hconn = this.getConnectionByTableName(table);

        for (KeyValue kv : kvs) {
          // filtering HLog meta entries
          // We don't handle HBASE-2231 because we may or may not replay a compaction event.
          // Details at https://issues.apache.org/jira/browse/HBASE-2231?focusedCommentId=13647143&
          // page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13647143
          if (kv.matchingFamily(WALEdit.METAFAMILY)) {
            skippedKVs.add(kv);
            continue;
          }

          try {
            loc =
                locateRegionAndRefreshLastFlushedSequenceId(hconn, table, kv.getRow(),
                  encodeRegionNameStr);
          } catch (TableNotFoundException ex) {
            // table has been deleted so skip edits of the table
            LOG.info("Table " + table + " doesn't exist. Skip log replay for region "
                + encodeRegionNameStr);
            lastFlushedSequenceIds.put(encodeRegionNameStr, Long.MAX_VALUE);
            if (nonExistentTables == null) {
              nonExistentTables = new TreeSet<TableName>();
            }
            nonExistentTables.add(table);
            this.skippedEdits.incrementAndGet();
            needSkip = true;
            break;
          }

          cachedLastFlushedSequenceId =
              lastFlushedSequenceIds.get(loc.getRegionInfo().getEncodedName());
          if (cachedLastFlushedSequenceId != null
              && cachedLastFlushedSequenceId >= entry.getKey().getLogSeqNum()) {
            // skip the whole HLog entry
            this.skippedEdits.incrementAndGet();
            needSkip = true;
            break;
          } else {
            if (maxStoreSequenceIds == null) {
              maxStoreSequenceIds =
                  regionMaxSeqIdInStores.get(loc.getRegionInfo().getEncodedName());
            }
            if (maxStoreSequenceIds != null) {
              Long maxStoreSeqId = maxStoreSequenceIds.get(kv.getFamily());
              if (maxStoreSeqId == null || maxStoreSeqId >= entry.getKey().getLogSeqNum()) {
                // skip current kv if column family doesn't exist anymore or already flushed
                skippedKVs.add(kv);
                continue;
              }
            }
          }
        }

        // skip the edit
        if (loc == null || needSkip) continue;

        if (!skippedKVs.isEmpty()) {
          kvs.removeAll(skippedKVs);
        }
        synchronized (serverToBufferQueueMap) {
          locKey = loc.getHostnamePort() + KEY_DELIMITER + table;
          List<Pair<HRegionLocation, HLog.Entry>> queue = serverToBufferQueueMap.get(locKey);
          if (queue == null) {
            queue =
                Collections.synchronizedList(new ArrayList<Pair<HRegionLocation, HLog.Entry>>());
            serverToBufferQueueMap.put(locKey, queue);
          }
          queue.add(new Pair<HRegionLocation, HLog.Entry>(loc, entry));
        }
        // store regions we have recovered so far
        addToRecoveredRegions(loc.getRegionInfo().getEncodedName());
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.HRegionLocation

     * @throws IOException
     */
    private HRegionLocation locateRegionAndRefreshLastFlushedSequenceId(HConnection hconn,
        TableName table, byte[] row, String originalEncodedRegionName) throws IOException {
      // fetch location from cache
      HRegionLocation loc = onlineRegions.get(originalEncodedRegionName);
      if(loc != null) return loc;
      // fetch location from hbase:meta directly without using cache to avoid hit old dead server
      loc = hconn.getRegionLocation(table, row, true);
      if (loc == null) {
        throw new IOException("Can't locate location for row:" + Bytes.toString(row)
            + " of table:" + table);
      }
      // check if current row moves to a different region due to region merge/split
      if (!originalEncodedRegionName.equalsIgnoreCase(loc.getRegionInfo().getEncodedName())) {
        // originalEncodedRegionName should have already flushed
        lastFlushedSequenceIds.put(originalEncodedRegionName, Long.MAX_VALUE);
        HRegionLocation tmpLoc = onlineRegions.get(loc.getRegionInfo().getEncodedName());
        if (tmpLoc != null) return tmpLoc;
      }

      Long lastFlushedSequenceId = -1l;
      AtomicBoolean isRecovering = new AtomicBoolean(true);
View Full Code Here

Examples of org.apache.hadoop.hbase.HRegionLocation

    public void init(HConnection connection) {
    }

    @Override
    public HRegionLocation getMetaRegionLocation() throws IOException {
      return new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, META_HOST);
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.HRegionLocation

      context.getConfiguration().get("hbase.nameserver.address", null);
   
    Pair<byte[][], byte[][]> keys = table.getStartEndKeys();
    if (keys == null || keys.getFirst() == null ||
        keys.getFirst().length == 0) {
      HRegionLocation regLoc = table.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY, false);
      if (null == regLoc) {
        throw new IOException("Expecting at least one region.");
      }
      List<InputSplit> splits = new ArrayList<InputSplit>(1);
      InputSplit split = new TableSplit(table.getName(),
          HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc
              .getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0]);
      splits.add(split);
      return splits;
    }
    List<InputSplit> splits = new ArrayList<InputSplit>(keys.getFirst().length);
    for (int i = 0; i < keys.getFirst().length; i++) {
      if ( !includeRegionInSplit(keys.getFirst()[i], keys.getSecond()[i])) {
        continue;
      }
      HRegionLocation location = table.getRegionLocation(keys.getFirst()[i], false);
      // The below InetSocketAddress creation does a name resolution.
      InetSocketAddress isa = new InetSocketAddress(location.getHostname(), location.getPort());
      if (isa.isUnresolved()) {
        LOG.warn("Failed resolve " + isa);
      }
      InetAddress regionAddress = isa.getAddress();
      String regionLocation;
      try {
        regionLocation = reverseDNS(regionAddress);
      } catch (NamingException e) {
        LOG.error("Cannot resolve the host name for " + regionAddress + " because of " + e);
        regionLocation = location.getHostname();
      }

      byte[] startRow = scan.getStartRow();
      byte[] stopRow = scan.getStopRow();
      // determine if the given start an stop key fall into the region
View Full Code Here

Examples of org.apache.hadoop.hbase.HRegionLocation

    List<HRegionLocation> hrls = new ArrayList<HRegionLocation>(NB_REGS);
    List<Get> gets = new ArrayList<Get>(NB_REGS);
    for (int i = 0; i < NB_REGS; i++) {
      HRegionInfo hri = new HRegionInfo(
          DUMMY_TABLE, Bytes.toBytes(i * 10L), Bytes.toBytes(i * 10L + 9L));
      HRegionLocation hrl = new HRegionLocation(hri, i % 2 == 0 ? sn : sn2);
      hrls.add(hrl);

      Get get = new Get(Bytes.toBytes(i * 10L));
      gets.add(get);
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.HRegionLocation

    }
    List<byte[]> keysInRange = new ArrayList<byte[]>();
    List<HRegionLocation> regionsInRange = new ArrayList<HRegionLocation>();
    byte[] currentKey = startKey;
    do {
      HRegionLocation regionLocation = getRegionLocation(currentKey, reload);
      keysInRange.add(currentKey);
      regionsInRange.add(regionLocation);
      currentKey = regionLocation.getRegionInfo().getEndKey();
    } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW)
        && (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0
            || (includeEndKey && Bytes.compareTo(currentKey, endKey) == 0)));
    return new Pair<List<byte[]>, List<HRegionLocation>>(keysInRange,
        regionsInRange);
View Full Code Here

Examples of org.apache.hadoop.hbase.HRegionLocation

  /**
   * Record the location of the hbase:meta region as found in ZooKeeper.
   */
  private boolean recordMetaRegion() throws IOException {
    HRegionLocation metaLocation = connection.locateRegion(
      TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW);

    // Check if Meta region is valid and existing
    if (metaLocation == null || metaLocation.getRegionInfo() == null ||
        metaLocation.getHostname() == null) {
      errors.reportError(ERROR_CODE.NULL_META_REGION,
        "META region or some of its attributes are null.");
      return false;
    }
    ServerName sn;
    try {
      sn = getMetaRegionServerName();
    } catch (KeeperException e) {
      throw new IOException(e);
    }
    MetaEntry m = new MetaEntry(metaLocation.getRegionInfo(), sn, System.currentTimeMillis());
    HbckInfo hbckInfo = regionInfoMap.get(metaLocation.getRegionInfo().getEncodedName());
    if (hbckInfo == null) {
      regionInfoMap.put(metaLocation.getRegionInfo().getEncodedName(), new HbckInfo(m));
    } else {
      hbckInfo.metaEntry = m;
    }
    return true;
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.HRegionLocation

    Put p = new Put(FAM_NAM);
    p.add(FAM_NAM, FAM_NAM, FAM_NAM);
    table.put(p);

    final HConnectionImplementation hci =  (HConnectionImplementation)table.getConnection();
    final HRegionLocation loc = table.getRegionLocation(FAM_NAM);

    Get get = new Get(FAM_NAM);
    Assert.assertNotNull(table.get(get));

    get = new Get(FAM_NAM);
    get.setFilter(new BlockingFilter());

    // This thread will mark the server as dead while we're waiting during a get.
    Thread t = new Thread() {
      @Override
      public void run() {
        synchronized (syncBlockingFilter) {
          try {
            syncBlockingFilter.wait();
          } catch (InterruptedException e) {
            throw new RuntimeException(e);
          }
        }
        hci.clusterStatusListener.deadServerHandler.newDead(loc.getServerName());
      }
    };

    t.start();
    try {
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.