Package org.apache.hadoop.hbase.master

Examples of org.apache.hadoop.hbase.master.HMaster


   */
  public static JVMClusterUtil.MasterThread createMasterThread(
      final Configuration c, final Class<? extends HMaster> hmc,
      final int index)
  throws IOException {
    HMaster server;
    try {
      server = hmc.getConstructor(Configuration.class).newInstance(c);
    } catch (InvocationTargetException ite) {
      Throwable target = ite.getTargetException();
      throw new RuntimeException("Failed construction of Master: " +
View Full Code Here


  /**
   * Expire the Master's session
   * @throws Exception
   */
  public void expireMasterSession() throws Exception {
    HMaster master = hbaseCluster.getMaster();
    expireSession(master.getZooKeeper(), master);
  }
View Full Code Here

      session = pageContext.getSession();
      out = pageContext.getOut();
      _jspx_out = out;


  HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
  Configuration conf = master.getConfiguration();
  HServerAddress rootLocation = master.getRegionManager().getRootRegionLocation();
  Map<byte [], MetaRegion> onlineRegions = master.getRegionManager().getOnlineMetaRegions();
  Map<String, HServerInfo> serverToServerInfos =
    master.getServerManager().getServersToServerInfo();
  int interval = conf.getInt("hbase.regionserver.msginterval", 1000)/1000;
  if (interval == 0) {
      interval = 1;
  }
  boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
  Map<String, Integer> frags = null;
  if (showFragmentation) {
      frags = master.getTableFragmentation();
  }

      out.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \n  \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\"> \n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n<head><meta http-equiv=\"Content-Type\" content=\"text/html;charset=UTF-8\"/>\n<title>HBase Master: ");
      out.print( master.getMasterAddress().getHostname());
      out.write(':');
      out.print( master.getMasterAddress().getPort() );
      out.write("</title>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/hbase.css\" />\n</head>\n<body>\n<a id=\"logo\" href=\"http://wiki.apache.org/lucene-hadoop/Hbase\"><img src=\"/static/hbase_logo_med.gif\" alt=\"HBase Logo\" title=\"HBase Logo\" /></a>\n<h1 id=\"page_title\">Master: ");
      out.print(master.getMasterAddress().getHostname());
      out.write(':');
      out.print(master.getMasterAddress().getPort());
      out.write("</h1>\n<p id=\"links_menu\"><a href=\"/logs/\">Local logs</a>, <a href=\"/stacks\">Thread Dump</a>, <a href=\"/logLevel\">Log Level</a></p>\n\n");
if (JvmVersion.isBadJvmVersion()) {
      out.write("\n  <div class=\"warning\">\n  Your current JVM version ");
      out.print( System.getProperty("java.version") );
      out.write(" is known to be\n  unstable with HBase. Please see the\n  <a href=\"http://wiki.apache.org/hadoop/Hbase/Troubleshooting#A18\">HBase wiki</a>\n  for details.\n  </div>\n");
}
      out.write("\n\n<hr id=\"head_rule\" />\n\n<h2>Master Attributes</h2>\n<table>\n<tr><th>Attribute Name</th><th>Value</th><th>Description</th></tr>\n<tr><td>HBase Version</td><td>");
      out.print( org.apache.hadoop.hbase.util.VersionInfo.getVersion() );
      out.write(',');
      out.write(' ');
      out.write('r');
      out.print( org.apache.hadoop.hbase.util.VersionInfo.getRevision() );
      out.write("</td><td>HBase version and svn revision</td></tr>\n<tr><td>HBase Compiled</td><td>");
      out.print( org.apache.hadoop.hbase.util.VersionInfo.getDate() );
      out.write(',');
      out.write(' ');
      out.print( org.apache.hadoop.hbase.util.VersionInfo.getUser() );
      out.write("</td><td>When HBase version was compiled and by whom</td></tr>\n<tr><td>Hadoop Version</td><td>");
      out.print( org.apache.hadoop.util.VersionInfo.getVersion() );
      out.write(',');
      out.write(' ');
      out.write('r');
      out.print( org.apache.hadoop.util.VersionInfo.getRevision() );
      out.write("</td><td>Hadoop version and svn revision</td></tr>\n<tr><td>Hadoop Compiled</td><td>");
      out.print( org.apache.hadoop.util.VersionInfo.getDate() );
      out.write(',');
      out.write(' ');
      out.print( org.apache.hadoop.util.VersionInfo.getUser() );
      out.write("</td><td>When Hadoop version was compiled and by whom</td></tr>\n<tr><td>HBase Root Directory</td><td>");
      out.print( master.getRootDir().toString() );
      out.write("</td><td>Location of HBase home directory</td></tr>\n<tr><td>Load average</td><td>");
      out.print( master.getServerManager().getAverageLoad() );
      out.write("</td><td>Average number of regions per regionserver. Naive computation.</td></tr>\n<tr><td>Regions On FS</td><td>");
      out.print( master.getRegionManager().countRegionsOnFS() );
      out.write("</td><td>Number of regions on FileSystem. Rough count.</td></tr>\n");
  if (showFragmentation) {
      out.write("\n        <tr><td>Fragmentation</td><td>");
      out.print( frags.get("-TOTAL-") != null ? frags.get("-TOTAL-").intValue() + "%" : "n/a" );
      out.write("</td><td>Overall fragmentation of all tables, including .META. and -ROOT-.</td></tr>\n");
  }
      out.write("\n<tr><td>Zookeeper Quorum</td><td>");
      out.print( master.getZooKeeperWrapper().getQuorumServers() );
      out.write("</td><td>Addresses of all registered ZK servers. For more, see <a href=\"/zk.jsp\">zk dump</a>.</td></tr>\n</table>\n\n<h2>Catalog Tables</h2>\n");
  if (rootLocation != null) {
      out.write("\n<table>\n<tr>\n    <th>Table</th>\n");
  if (showFragmentation) {
View Full Code Here

      session = pageContext.getSession();
      out = pageContext.getOut();
      _jspx_out = out;


  HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
  Configuration conf = master.getConfiguration();
  HBaseAdmin hbadmin = new HBaseAdmin(conf);
  String tableName = request.getParameter("name");
  HTable table = new HTable(conf, tableName);
  String tableHeader = "<h2>Table Regions</h2><table><tr><th>Name</th><th>Region Server</th><th>Start Key</th><th>End Key</th></tr>";
  HServerAddress rootLocation = master.getRegionManager().getRootRegionLocation();
  boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
  Map<String, Integer> frags = null;
  if (showFragmentation) {
      frags = master.getTableFragmentation();
  }

      out.write("\n\n<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \n  \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\"> \n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n\n");

  String action = request.getParameter("action");
  String key = request.getParameter("key");
  if ( action != null ) {

      out.write("\n<head><meta http-equiv=\"Content-Type\" content=\"text/html;charset=UTF-8\"/>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/hbase.css\" />\n</head>\n<body>\n<a id=\"logo\" href=\"http://wiki.apache.org/lucene-hadoop/Hbase\"><img src=\"/static/hbase_logo_med.gif\" alt=\"HBase Logo\" title=\"HBase Logo\" /></a>\n<h1 id=\"page_title\">Table action request accepted</h1>\n<p><hr><p>\n");

  if (action.equals("split")) {
    if (key != null && key.length() > 0) {
      Writable[] arr = new Writable[1];
      arr[0] = new ImmutableBytesWritable(Bytes.toBytes(key));
      master.modifyTable(Bytes.toBytes(tableName), HConstants.Modify.TABLE_SPLIT, arr);
    } else {
      master.modifyTable(Bytes.toBytes(tableName), HConstants.Modify.TABLE_SPLIT, null);
    }
   
      out.write(" Split request accepted. ");

  } else if (action.equals("compact")) {
    if (key != null && key.length() > 0) {
      Writable[] arr = new Writable[1];
      arr[0] = new ImmutableBytesWritable(Bytes.toBytes(key));
      master.modifyTable(Bytes.toBytes(tableName), HConstants.Modify.TABLE_COMPACT, arr);
    } else {
      master.modifyTable(Bytes.toBytes(tableName), HConstants.Modify.TABLE_COMPACT, null);
    }
   
      out.write(" Compact request accepted. ");

  }

      out.write("\n<p>Reload.\n</body>\n");

} else {

      out.write("\n<head><meta http-equiv=\"Content-Type\" content=\"text/html;charset=UTF-8\"/>\n<title>Table: ");
      out.print( tableName );
      out.write("</title>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/hbase.css\" />\n</head>\n<body>\n<a id=\"logo\" href=\"http://wiki.apache.org/lucene-hadoop/Hbase\"><img src=\"/static/hbase_logo_med.gif\" alt=\"HBase Logo\" title=\"HBase Logo\" /></a>\n<h1 id=\"page_title\">Table: ");
      out.print( tableName );
      out.write("</h1>\n<p id=\"links_menu\"><a href=\"/master.jsp\">Master</a>, <a href=\"/logs/\">Local logs</a>, <a href=\"/stacks\">Thread Dump</a>, <a href=\"/logLevel\">Log Level</a></p>\n<hr id=\"head_rule\" />\n");

  if(tableName.equals(Bytes.toString(HConstants.ROOT_TABLE_NAME))) {

      out.write('\n');
      out.print( tableHeader );
      out.write('\n');

  int infoPort = master.getServerManager().getHServerInfo(rootLocation).getInfoPort();
  String url = "http://" + rootLocation.getHostname() + ":" + infoPort + "/";

      out.write("\n<tr>\n  <td>");
      out.print( tableName );
      out.write("</td>\n  <td><a href=\"");
      out.print( url );
      out.write('"');
      out.write('>');
      out.print( rootLocation.getHostname() );
      out.write(':');
      out.print( rootLocation.getPort() );
      out.write("</a></td>\n  <td>-</td>\n  <td></td>\n  <td>-</td>\n</tr>\n</table>\n");

  } else if(tableName.equals(Bytes.toString(HConstants.META_TABLE_NAME))) {

      out.write('\n');
      out.print( tableHeader );
      out.write('\n');

  Map<byte [], MetaRegion> onlineRegions = master.getRegionManager().getOnlineMetaRegions();
  for (MetaRegion meta: onlineRegions.values()) {
    int infoPort = master.getServerManager().getHServerInfo(meta.getServer()).getInfoPort();
    String url = "http://" + meta.getServer().getHostname() + ":" + infoPort + "/";

      out.write("\n<tr>\n  <td>");
      out.print( Bytes.toString(meta.getRegionName()) );
      out.write("</td>\n    <td><a href=\"");
      out.print( url );
      out.write('"');
      out.write('>');
      out.print( meta.getServer().getHostname().toString() + ":" + infoPort );
      out.write("</a></td>\n    <td>-</td><td>");
      out.print( Bytes.toString(meta.getStartKey()) );
      out.write("</td><td>");
      out.print( Bytes.toString(meta.getEndKey()) );
      out.write("</td>\n</tr>\n");
  }
      out.write("\n</table>\n");
} else {
  try {
      out.write("\n<h2>Table Attributes</h2>\n<table>\n  <tr>\n      <th>Attribute Name</th>\n      <th>Value</th>\n      <th>Description</th></tr>\n  <tr>\n      <td>Enabled</td>\n      <td>");
      out.print( hbadmin.isTableEnabled(table.getTableName()) );
      out.write("</td>\n      <td>Is the table enabled</td>\n  </tr>\n");
  if (showFragmentation) {
      out.write("\n  <tr>\n      <td>Fragmentation</td>\n      <td>");
      out.print( frags.get(tableName) != null ? frags.get(tableName).intValue() + "%" : "n/a" );
      out.write("</td>\n      <td>How fragmented is the table. After a major compaction it is 0%.</td>\n  </tr>\n");
  }
      out.write("\n</table>\n");

  Map<HRegionInfo, HServerAddress> regions = table.getRegionsInfo();
  if(regions != null && regions.size() > 0) {
      out.write('\n');
      out.print(     tableHeader );
      out.write('\n');

  for(Map.Entry<HRegionInfo, HServerAddress> hriEntry : regions.entrySet()) {
    int infoPort = master.getServerManager().getHServerInfo(hriEntry.getValue()).getInfoPort();
    String urlRegionServer =
        "http://" + hriEntry.getValue().getHostname().toString() + ":" + infoPort + "/";

      out.write("\n<tr>\n  <td>");
      out.print( Bytes.toStringBinary(hriEntry.getKey().getRegionName()));
View Full Code Here

      session = pageContext.getSession();
      out = pageContext.getOut();
      _jspx_out = out;


  HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
  Configuration conf = master.getConfiguration();
  HBaseAdmin hbadmin = new HBaseAdmin(conf);
  HConnection connection = hbadmin.getConnection();
  ZooKeeperWrapper wrapper = connection.getZooKeeperWrapper();

      out.write("\n\n<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \n  \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\"> \n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n<head><meta http-equiv=\"Content-Type\" content=\"text/html;charset=UTF-8\"/>\n<title>ZooKeeper Dump</title>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/hbase.css\" />\n</head>\n<body>\n<a id=\"logo\" href=\"http://hbase.org\"><img src=\"/static/hbase_logo_med.gif\" alt=\"HBase Logo\" title=\"HBase Logo\" /></a>\n<h1 id=\"page_title\">ZooKeeper Dump</h1>\n<p id=\"links_menu\"><a href=\"/master.jsp\">Master</a>, <a href=\"/logs/\">Local logs</a>, <a href=\"/stacks\">Thread Dump</a>, <a href=\"/logLevel\">Log Level</a></p>\n<hr id=\"head_rule\" />\n<pre>\n");
View Full Code Here

      setupTable(table);
      assertEquals(ROWKEYS.length, countRows());

      // Mess it up by creating an overlap
      MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
      HMaster master = cluster.getMaster();
      HRegionInfo hriOverlap1 = createRegion(conf, tbl.getTableDescriptor(),
        Bytes.toBytes("A"), Bytes.toBytes("AB"));
      master.assignRegion(hriOverlap1);
      master.getAssignmentManager().waitForAssignment(hriOverlap1);
      HRegionInfo hriOverlap2 = createRegion(conf, tbl.getTableDescriptor(),
        Bytes.toBytes("AB"), Bytes.toBytes("B"));
      master.assignRegion(hriOverlap2);
      master.getAssignmentManager().waitForAssignment(hriOverlap2);

      HBaseFsck hbck = doFsck(conf, false);
      assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.DUPE_STARTKEYS,
        ERROR_CODE.DUPE_STARTKEYS, ERROR_CODE.OVERLAP_IN_REGION_CHAIN});
      assertEquals(3, hbck.getOverlapGroups(table).size());
View Full Code Here

  @Test (timeout=300000)
  public void testMoveToPreviouslyAssignedRS() throws IOException, InterruptedException {
    byte[] tableName = Bytes.toBytes("testMoveToPreviouslyAssignedRS");
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    HBaseAdmin localAdmin = createTable(tableName);
    List<HRegionInfo> tableRegions = localAdmin.getTableRegions(tableName);
    HRegionInfo hri = tableRegions.get(0);
    AssignmentManager am = master.getAssignmentManager();
    assertTrue("Region " + hri.getRegionNameAsString()
      + " should be assigned properly", am.waitForAssignment(hri));
    ServerName server = am.getRegionStates().getRegionServerOfRegion(hri);
    localAdmin.move(hri.getEncodedNameAsBytes(), Bytes.toBytes(server.getServerName()));
    assertEquals("Current region server and region server before move should be same.", server,
View Full Code Here

  }

  private void moveRegionAndWait(HRegion destRegion, HRegionServer destServer)
      throws InterruptedException, MasterNotRunningException,
      ZooKeeperConnectionException, IOException {
    HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
    TEST_UTIL.getHBaseAdmin().move(
        destRegion.getRegionInfo().getEncodedNameAsBytes(),
        Bytes.toBytes(destServer.getServerName().getServerName()));
    while (true) {
      ServerName serverName = master.getAssignmentManager()
        .getRegionStates().getRegionServerOfRegion(destRegion.getRegionInfo());
      if (serverName != null && serverName.equals(destServer.getServerName())) {
        TEST_UTIL.assertRegionOnServer(
          destRegion.getRegionInfo(), serverName, 200);
        break;
View Full Code Here

  /**
   * Expire the Master's session
   * @throws Exception
   */
  public void expireMasterSession() throws Exception {
    HMaster master = getMiniHBaseCluster().getMaster();
    expireSession(master.getZooKeeper(), false);
  }
View Full Code Here

   */
  @Test  // (timeout=30000)
  public void testDrainingServerOffloading()
  throws Exception {
    // I need master in the below.
    HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
    HRegionInfo hriToMoveBack = null;
    // Set first server as draining server.
    HRegionServer drainingServer =
      setDrainingServer(TEST_UTIL.getMiniHBaseCluster().getRegionServer(0));
    try {
      final int regionsOnDrainingServer =
        drainingServer.getNumberOfOnlineRegions();
      Assert.assertTrue(regionsOnDrainingServer > 0);
      List<HRegionInfo> hris = ProtobufUtil.getOnlineRegions(drainingServer);
      for (HRegionInfo hri : hris) {
        // Pass null and AssignmentManager will chose a random server BUT it
        // should exclude draining servers.
        master.moveRegion(null,
          RequestConverter.buildMoveRegionRequest(hri.getEncodedNameAsBytes(), null));
        // Save off region to move back.
        hriToMoveBack = hri;
      }
      // Wait for regions to come back on line again.
      waitForAllRegionsOnline();
      Assert.assertEquals(0, drainingServer.getNumberOfOnlineRegions());
    } finally {
      unsetDrainingServer(drainingServer);
    }
    // Now we've unset the draining server, we should be able to move a region
    // to what was the draining server.
    master.moveRegion(null,
      RequestConverter.buildMoveRegionRequest(hriToMoveBack.getEncodedNameAsBytes(),
      Bytes.toBytes(drainingServer.getServerName().toString())));
    // Wait for regions to come back on line again.
    waitForAllRegionsOnline();
    Assert.assertEquals(1, drainingServer.getNumberOfOnlineRegions());
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.master.HMaster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.