Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.HMsg


    reportClose(region, null);
  }

  /* Add to the outbound message buffer */
  private void reportClose(final HRegionInfo region, final Text message) {
    outboundMsgs.add(new HMsg(HMsg.Type.MSG_REPORT_CLOSE, region, message));
  }
View Full Code Here


   * next rescan of the root or meta tables.
   */
  void reportSplit(HRegionInfo oldRegion, HRegionInfo newRegionA,
      HRegionInfo newRegionB) {

    outboundMsgs.add(new HMsg(HMsg.Type.MSG_REPORT_SPLIT, oldRegion,
      new Text(oldRegion.getRegionNameAsString() + " split; daughters: " +
        newRegionA.getRegionNameAsString() + ", " +
        newRegionB.getRegionNameAsString())));
    outboundMsgs.add(new HMsg(HMsg.Type.MSG_REPORT_OPEN, newRegionA));
    outboundMsgs.add(new HMsg(HMsg.Type.MSG_REPORT_OPEN, newRegionB));
  }
View Full Code Here

   * and then while the region is being opened, it is called from the Worker
   * thread that is running the region open.
   * @param hri Region to add the message for
   */
  protected void addProcessingMessage(final HRegionInfo hri) {
    getOutboundMsgs().add(new HMsg(HMsg.Type.MSG_REPORT_PROCESS_OPEN, hri));
  }
View Full Code Here

        LOG.info("assigning region " +
          Bytes.toString(regionInfo.getRegionName())+
          " to server " + serverName);
        unassignedRegions.put(regionInfo, Long.valueOf(now));
        this.historian.addRegionAssignment(regionInfo, serverName);
        returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_OPEN, regionInfo));
        if (--nregions <= 0) {
          break;
        }
      }
    }
View Full Code Here

      LOG.info("assigning region " +
          Bytes.toString(regionInfo.getRegionName()) +
          " to the only server " + serverName);
      unassignedRegions.put(regionInfo, Long.valueOf(now));
      this.historian.addRegionAssignment(regionInfo, serverName);
      returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_OPEN, regionInfo));
    }
  }
View Full Code Here

      }
     
      LOG.debug("Going to close region " + currentRegion.getRegionNameAsString());
     
      // make a message to close the region
      returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_CLOSE, currentRegion,
        OVERLOADED));
      // mark the region as closing
      setClosing(currentRegion.getRegionName());
      // increment the count of regions we've marked
      regionsClosed++;
View Full Code Here

    synchronized (master.regionManager) {
      // Tell the region server to close regions that we have marked for closing.
      for (HRegionInfo i:
        master.regionManager.getMarkedToClose(serverInfo.getServerName())) {
        returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_CLOSE, i));
        // Transition the region from toClose to closing state
        master.regionManager.setPendingClose(i.getRegionNameAsString());
      }

     
View Full Code Here

        // This Region should not have been opened.
        // Ask the server to shut it down, but don't report it as closed. 
        // Otherwise the HMaster will think the Region was closed on purpose,
        // and then try to reopen it elsewhere; that's not what we want.
        returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_CLOSE_WITHOUT_REPORT,
            region, "Duplicate assignment".getBytes()));
      } else {
        if (region.isRootRegion()) {
          // it was assigned, and it's not a duplicate assignment, so take it out
          // of the unassigned list.
View Full Code Here

          // It has been way too long since we last reported to the master.
          LOG.warn("unable to report to master for " + (now - lastMsg) +
            " milliseconds - retrying");
        }
        if ((now - lastMsg) >= msgInterval) {
          HMsg outboundArray[] = null;
          synchronized(this.outboundMsgs) {
            outboundArray =
              this.outboundMsgs.toArray(new HMsg[outboundMsgs.size()]);
            this.outboundMsgs.clear();
          }
          try {
            doMetrics();
            MemoryUsage memory =
                ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
            HServerLoad hsl = new HServerLoad(requestCount.get(),
              (int)(memory.getUsed()/1024/1024),
              (int)(memory.getMax()/1024/1024));
            for (HRegion r: onlineRegions.values()) {
              hsl.addRegionInfo(createRegionLoad(r));
            }
            this.serverInfo.setLoad(hsl);
            this.requestCount.set(0);
            HMsg msgs[] = hbaseMaster.regionServerReport(
              serverInfo, outboundArray, getMostLoadedRegions());
            lastMsg = System.currentTimeMillis();
            if (this.quiesced.get() && onlineRegions.size() == 0) {
              // We've just told the master we're exiting because we aren't
              // serving any regions. So set the stop bit and exit.
              LOG.info("Server quiesced and not serving any regions. " +
                "Starting shutdown");
              stopRequested.set(true);
              this.outboundMsgs.clear();
              continue;
            }

            // Queue up the HMaster's instruction stream for processing
            boolean restart = false;
            for(int i = 0;
                !restart && !stopRequested.get() && i < msgs.length;
                i++) {
              LOG.info(msgs[i].toString());
              if (safeMode.get()) {
                if (zooKeeperWrapper.checkOutOfSafeMode()) {
                  this.connection.unsetRootRegionLocation();
                  synchronized (safeMode) {
                    safeMode.set(false);
                    safeMode.notifyAll();
                  }
                }
              }
              switch(msgs[i].getType()) {
              case MSG_CALL_SERVER_STARTUP:
                // We the MSG_CALL_SERVER_STARTUP on startup but we can also
                // get it when the master is panicking because for instance
                // the HDFS has been yanked out from under it.  Be wary of
                // this message.
                if (checkFileSystem()) {
                  closeAllRegions();
                  try {
                    hlog.closeAndDelete();
                  } catch (Exception e) {
                    LOG.error("error closing and deleting HLog", e);
                  }
                  try {
                    serverInfo.setStartCode(System.currentTimeMillis());
                    hlog = setupHLog();
                    this.hlogFlusher.setHLog(hlog);
                  } catch (IOException e) {
                    this.abortRequested = true;
                    this.stopRequested.set(true);
                    e = RemoteExceptionHandler.checkIOException(e);
                    LOG.fatal("error restarting server", e);
                    break;
                  }
                  reportForDuty();
                  restart = true;
                } else {
                  LOG.fatal("file system available check failed. " +
                  "Shutting down server.");
                }
                break;

              case MSG_REGIONSERVER_STOP:
                stopRequested.set(true);
                break;

              case MSG_REGIONSERVER_QUIESCE:
                if (!quiesceRequested) {
                  try {
                    toDo.put(new ToDoEntry(msgs[i]));
                  } catch (InterruptedException e) {
                    throw new RuntimeException("Putting into msgQueue was " +
                        "interrupted.", e);
                  }
                  quiesceRequested = true;
                }
                break;

              default:
                if (fsOk) {
                  try {
                    toDo.put(new ToDoEntry(msgs[i]));
                  } catch (InterruptedException e) {
                    throw new RuntimeException("Putting into msgQueue was " +
                        "interrupted.", e);
                  }
                }
              }
            }
            // Reset tries count if we had a successful transaction.
            tries = 0;

            if (restart || this.stopRequested.get()) {
              toDo.clear();
              continue;
            }
          } catch (Exception e) {
            if (e instanceof IOException) {
              e = RemoteExceptionHandler.checkIOException((IOException) e);
            }
            if (tries < this.numRetries) {
              LOG.warn("Processing message (Retry: " + tries + ")", e);
              tries++;
            } else {
              LOG.error("Exceeded max retries: " + this.numRetries, e);
              if (checkFileSystem()) {
                // Filesystem is OK.  Something is up w/ ZK or master.  Sleep
                // a little while if only to stop our logging many times a
                // millisecond.
                Thread.sleep(1000);
              }
            }
            if (this.stopRequested.get()) {
                LOG.info("Stop was requested, clearing the toDo " +
                        "despite of the exception");
                toDo.clear();
                continue;
            }
          }
        }
        // Do some housekeeping before going to sleep
        housekeeping();
        sleeper.sleep(lastMsg);
      } // for
    } catch (Throwable t) {
      if (!checkOOME(t)) {
        LOG.fatal("Unhandled exception. Aborting...", t);
        abort();
      }
    }
    RegionHistorian.getInstance().offline();
    this.leases.closeAfterLeasesExpire();
    this.worker.stop();
    this.server.stop();
    if (this.infoServer != null) {
      LOG.info("Stopping infoServer");
      try {
        this.infoServer.stop();
      } catch (Exception e) {
        e.printStackTrace();
      }
    }
    // Send cache a shutdown.
    LruBlockCache c = (LruBlockCache)StoreFile.getBlockCache(this.conf);
    if (c != null) c.shutdown();

    // Send interrupts to wake up threads if sleeping so they notice shutdown.
    // TODO: Should we check they are alive?  If OOME could have exited already
    cacheFlusher.interruptIfNecessary();
    hlogFlusher.interrupt();
    compactSplitThread.interruptIfNecessary();
    hlogRoller.interruptIfNecessary();
    this.majorCompactionChecker.interrupt();

    if (abortRequested) {
      if (this.fsOk) {
        // Only try to clean up if the file system is available
        try {
          if (this.hlog != null) {
            this.hlog.close();
            LOG.info("On abort, closed hlog");
          }
        } catch (Throwable e) {
          LOG.error("Unable to close log in abort",
            RemoteExceptionHandler.checkThrowable(e));
        }
        closeAllRegions(); // Don't leave any open file handles
      }
      LOG.info("aborting server at: " +
        serverInfo.getServerAddress().toString());
    } else {
      ArrayList<HRegion> closedRegions = closeAllRegions();
      try {
        if (this.hlog != null) {
          hlog.closeAndDelete();
        }
      } catch (Throwable e) {
        LOG.error("Close and delete failed",
          RemoteExceptionHandler.checkThrowable(e));
      }
      try {
        HMsg[] exitMsg = new HMsg[closedRegions.size() + 1];
        exitMsg[0] = REPORT_EXITING;
        // Tell the master what regions we are/were serving
        int i = 1;
        for (HRegion region: closedRegions) {
          exitMsg[i++] = new HMsg(HMsg.Type.MSG_REPORT_CLOSE,
              region.getRegionInfo());
        }

        LOG.info("telling master that region server is shutting down at: " +
            serverInfo.getServerAddress().toString());
View Full Code Here

      return;
    }
    // This iterator is 'safe'.  We are guaranteed a view on state of the
    // queue at time iterator was taken out.  Apparently goes from oldest.
    for (ToDoEntry e: this.toDo) {
      HMsg msg = e.msg;
      if (msg == null) {
        LOG.warn("Message is empty: " + e);
      }
      if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
        addProcessingMessage(e.msg.getRegionInfo());
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.HMsg

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.