Examples of DatanodeManager


Examples of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager

    // Update old block with the new generation stamp and new length
    blockinfo.setGenerationStamp(newBlock.getGenerationStamp());
    blockinfo.setNumBytes(newBlock.getNumBytes());

    // find the DatanodeDescriptor objects
    final DatanodeManager dm = getBlockManager().getDatanodeManager();
    DatanodeDescriptor[] descriptors = null;
    if (newNodes.length > 0) {
      descriptors = new DatanodeDescriptor[newNodes.length];
      for(int i = 0; i < newNodes.length; i++) {
        descriptors[i] = dm.getDatanode(newNodes[i]);
      }
    }
    blockinfo.setExpectedLocations(descriptors);

    // persist blocks only if append is supported
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager

    }

    void generateHealthReport(JspWriter out, NameNode nn,
        HttpServletRequest request) throws IOException {
      FSNamesystem fsn = nn.getNamesystem();
      final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
      final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
      final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
      dm.fetchDatanodes(live, dead, true);

      int liveDecommissioned = 0;
      for (DatanodeDescriptor d : live) {
        liveDecommissioned += d.isDecommissioned() ? 1 : 0;
      }

      int deadDecommissioned = 0;
      for (DatanodeDescriptor d : dead) {
        deadDecommissioned += d.isDecommissioned() ? 1 : 0;
      }
     
      final List<DatanodeDescriptor> decommissioning = dm.getDecommissioningNodes();

      sorterField = request.getParameter("sorter/field");
      sorterOrder = request.getParameter("sorter/order");
      if (sorterField == null)
        sorterField = "name";
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager

    void generateNodesList(ServletContext context, JspWriter out,
        HttpServletRequest request) throws IOException {
      final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
      final FSNamesystem ns = nn.getNamesystem();
      final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();

      final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
      final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
      dm.fetchDatanodes(live, dead, true);

      InetSocketAddress nnSocketAddress =
          (InetSocketAddress)context.getAttribute(
              NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY);
      String nnaddr = nnSocketAddress.getAddress().getHostAddress() + ":"
          + nnSocketAddress.getPort();

      whatNodes = request.getParameter("whatNodes"); // show only live or only
                                                     // dead nodes
      if (null == whatNodes || whatNodes.isEmpty()) {
        out.print("Invalid input");
        return;
      }
      sorterField = request.getParameter("sorter/field");
      sorterOrder = request.getParameter("sorter/order");
      if (sorterField == null)
        sorterField = "name";
      if (sorterOrder == null)
        sorterOrder = "ASC";

      JspHelper.sortNodeList(live, sorterField, sorterOrder);

      // Find out common suffix. Should this be before or after the sort?
      String port_suffix = null;
      if (live.size() > 0) {
        String name = live.get(0).getXferAddr();
        int idx = name.indexOf(':');
        if (idx > 0) {
          port_suffix = name.substring(idx);
        }

        for (int i = 1; port_suffix != null && i < live.size(); i++) {
          if (live.get(i).getXferAddr().endsWith(port_suffix) == false) {
            port_suffix = null;
            break;
          }
        }
      }

      counterReset();

      try {
        Thread.sleep(1000);
      } catch (InterruptedException e) {
      }

      if (live.isEmpty() && dead.isEmpty()) {
        out.print("There are no datanodes in the cluster");
      } else {

        int nnHttpPort = nn.getHttpAddress().getPort();
        out.print("<div id=\"dfsnodetable\"> ");
        if (whatNodes.equals("LIVE")) {
          out.print("<a name=\"LiveNodes\" id=\"title\">" + "Live Datanodes : "
              + live.size() + "</a>"
              + "<br><br>\n<table class=\"nodes\">\n");

          counterReset();

          if (live.size() > 0) {
            if (live.get(0).getCapacity() > 1024 * diskBytes) {
              diskBytes *= 1024;
              diskByteStr = "TB";
            }

            out.print("<tr class=\"headerRow\"> <th " + nodeHeaderStr("name")
                + "> Node <th " + nodeHeaderStr("lastcontact")
                + "> Last <br>Contact <th " + nodeHeaderStr("adminstate")
                + "> Admin State <th " + nodeHeaderStr("capacity")
                + "> Configured <br>Capacity (" + diskByteStr + ") <th "
                + nodeHeaderStr("used") + "> Used <br>(" + diskByteStr
                + ") <th " + nodeHeaderStr("nondfsused")
                + "> Non DFS <br>Used (" + diskByteStr + ") <th "
                + nodeHeaderStr("remaining") + "> Remaining <br>("
                + diskByteStr + ") <th " + nodeHeaderStr("pcused")
                + "> Used <br>(%) <th " + nodeHeaderStr("pcused")
                + "> Used <br>(%) <th " + nodeHeaderStr("pcremaining")
                + "> Remaining <br>(%) <th " + nodeHeaderStr("blocks")
                + "> Blocks <th "
                + nodeHeaderStr("bpused") + "> Block Pool<br>Used ("
                + diskByteStr + ") <th "
                + nodeHeaderStr("pcbpused")
                + "> Block Pool<br>Used (%)"
                + "> Blocks <th " + nodeHeaderStr("volfails")
                +"> Failed Volumes\n");

            JspHelper.sortNodeList(live, sorterField, sorterOrder);
            for (int i = 0; i < live.size(); i++) {
              generateNodeData(out, live.get(i), port_suffix, true, nnHttpPort,
                  nnaddr);
            }
          }
          out.print("</table>\n");
        } else if (whatNodes.equals("DEAD")) {

          out.print("<br> <a name=\"DeadNodes\" id=\"title\"> "
              + " Dead Datanodes : " + dead.size() + "</a><br><br>\n");

          if (dead.size() > 0) {
            out.print("<table border=1 cellspacing=0> <tr id=\"row1\"> "
                + "<th " + nodeHeaderStr("node")
                + "> Node <th " + nodeHeaderStr("decommissioned")
                + "> Decommissioned\n");

            JspHelper.sortNodeList(dead, sorterField, sorterOrder);
            for (int i = 0; i < dead.size(); i++) {
              generateNodeData(out, dead.get(i), port_suffix, false,
                  nnHttpPort, nnaddr);
            }

            out.print("</table>\n");
          }
        } else if (whatNodes.equals("DECOMMISSIONING")) {
          // Decommissioning Nodes
          final List<DatanodeDescriptor> decommissioning = dm.getDecommissioningNodes();
          out.print("<br> <a name=\"DecommissioningNodes\" id=\"title\"> "
              + " Decommissioning Datanodes : " + decommissioning.size()
              + "</a><br><br>\n");
          if (decommissioning.size() > 0) {
            out.print("<table border=1 cellspacing=0> <tr class=\"headRow\"> "
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager

  DatanodeInfo[] datanodeReport(final DatanodeReportType type
      ) throws AccessControlException {
    checkSuperuserPrivilege();
    readLock();
    try {
      final DatanodeManager dm = getBlockManager().getDatanodeManager();     
      final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type);

      DatanodeInfo[] arr = new DatanodeInfo[results.size()];
      for (int i=0; i<arr.length; i++) {
        arr[i] = new DatanodeInfo(results.get(i));
      }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager

    // Update old block with the new generation stamp and new length
    blockinfo.setGenerationStamp(newBlock.getGenerationStamp());
    blockinfo.setNumBytes(newBlock.getNumBytes());

    // find the DatanodeDescriptor objects
    final DatanodeManager dm = getBlockManager().getDatanodeManager();
    DatanodeDescriptor[] descriptors = null;
    if (newNodes.length > 0) {
      descriptors = new DatanodeDescriptor[newNodes.length];
      for(int i = 0; i < newNodes.length; i++) {
        descriptors[i] = dm.getDatanode(newNodes[i]);
      }
    }
    blockinfo.setExpectedLocations(descriptors);

    String src = leaseManager.findPath(pendingFile);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager

    }

    void generateHealthReport(JspWriter out, NameNode nn,
        HttpServletRequest request) throws IOException {
      FSNamesystem fsn = nn.getNamesystem();
      final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
      final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
      final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
      dm.fetchDatanodes(live, dead, true);

      int liveDecommissioned = 0;
      for (DatanodeDescriptor d : live) {
        liveDecommissioned += d.isDecommissioned() ? 1 : 0;
      }

      int deadDecommissioned = 0;
      for (DatanodeDescriptor d : dead) {
        deadDecommissioned += d.isDecommissioned() ? 1 : 0;
      }
     
      final List<DatanodeDescriptor> decommissioning = dm.getDecommissioningNodes();

      sorterField = request.getParameter("sorter/field");
      sorterOrder = request.getParameter("sorter/order");
      if (sorterField == null)
        sorterField = "name";
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager

    void generateNodesList(ServletContext context, JspWriter out,
        HttpServletRequest request) throws IOException {
      final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
      final FSNamesystem ns = nn.getNamesystem();
      final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();

      final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
      final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
      dm.fetchDatanodes(live, dead, true);

      InetSocketAddress nnSocketAddress = (InetSocketAddress) context
          .getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY);
      String nnaddr = nnSocketAddress.getAddress().getHostAddress() + ":"
          + nnSocketAddress.getPort();

      whatNodes = request.getParameter("whatNodes"); // show only live or only
                                                     // dead nodes
      sorterField = request.getParameter("sorter/field");
      sorterOrder = request.getParameter("sorter/order");
      if (sorterField == null)
        sorterField = "name";
      if (sorterOrder == null)
        sorterOrder = "ASC";

      JspHelper.sortNodeList(live, sorterField, sorterOrder);

      // Find out common suffix. Should this be before or after the sort?
      String port_suffix = null;
      if (live.size() > 0) {
        String name = live.get(0).getName();
        int idx = name.indexOf(':');
        if (idx > 0) {
          port_suffix = name.substring(idx);
        }

        for (int i = 1; port_suffix != null && i < live.size(); i++) {
          if (live.get(i).getName().endsWith(port_suffix) == false) {
            port_suffix = null;
            break;
          }
        }
      }

      counterReset();

      try {
        Thread.sleep(1000);
      } catch (InterruptedException e) {
      }

      if (live.isEmpty() && dead.isEmpty()) {
        out.print("There are no datanodes in the cluster");
      } else {

        int nnHttpPort = nn.getHttpAddress().getPort();
        out.print("<div id=\"dfsnodetable\"> ");
        if (whatNodes.equals("LIVE")) {
          out.print("<a name=\"LiveNodes\" id=\"title\">" + "Live Datanodes : "
              + live.size() + "</a>"
              + "<br><br>\n<table border=1 cellspacing=0>\n");

          counterReset();

          if (live.size() > 0) {
            if (live.get(0).getCapacity() > 1024 * diskBytes) {
              diskBytes *= 1024;
              diskByteStr = "TB";
            }

            out.print("<tr class=\"headerRow\"> <th " + nodeHeaderStr("name")
                + "> Node <th " + nodeHeaderStr("lastcontact")
                + "> Last <br>Contact <th " + nodeHeaderStr("adminstate")
                + "> Admin State <th " + nodeHeaderStr("capacity")
                + "> Configured <br>Capacity (" + diskByteStr + ") <th "
                + nodeHeaderStr("used") + "> Used <br>(" + diskByteStr
                + ") <th " + nodeHeaderStr("nondfsused")
                + "> Non DFS <br>Used (" + diskByteStr + ") <th "
                + nodeHeaderStr("remaining") + "> Remaining <br>("
                + diskByteStr + ") <th " + nodeHeaderStr("pcused")
                + "> Used <br>(%) <th " + nodeHeaderStr("pcused")
                + "> Used <br>(%) <th " + nodeHeaderStr("pcremaining")
                + "> Remaining <br>(%) <th " + nodeHeaderStr("blocks")
                + "> Blocks <th "
                + nodeHeaderStr("bpused") + "> Block Pool<br>Used ("
                + diskByteStr + ") <th "
                + nodeHeaderStr("pcbpused")
                + "> Block Pool<br>Used (%)"
                + "> Blocks <th " + nodeHeaderStr("volfails")
                +"> Failed Volumes\n");

            JspHelper.sortNodeList(live, sorterField, sorterOrder);
            for (int i = 0; i < live.size(); i++) {
              generateNodeData(out, live.get(i), port_suffix, true, nnHttpPort,
                  nnaddr);
            }
          }
          out.print("</table>\n");
        } else if (whatNodes.equals("DEAD")) {

          out.print("<br> <a name=\"DeadNodes\" id=\"title\"> "
              + " Dead Datanodes : " + dead.size() + "</a><br><br>\n");

          if (dead.size() > 0) {
            out.print("<table border=1 cellspacing=0> <tr id=\"row1\"> "
                + "<th " + nodeHeaderStr("node")
                + "> Node <th " + nodeHeaderStr("decommissioned")
                + "> Decommissioned\n");

            JspHelper.sortNodeList(dead, sorterField, sorterOrder);
            for (int i = 0; i < dead.size(); i++) {
              generateNodeData(out, dead.get(i), port_suffix, false,
                  nnHttpPort, nnaddr);
            }

            out.print("</table>\n");
          }
        } else if (whatNodes.equals("DECOMMISSIONING")) {
          // Decommissioning Nodes
          final List<DatanodeDescriptor> decommissioning = dm.getDecommissioningNodes();
          out.print("<br> <a name=\"DecommissioningNodes\" id=\"title\"> "
              + " Decommissioning Datanodes : " + decommissioning.size()
              + "</a><br><br>\n");
          if (decommissioning.size() > 0) {
            out.print("<table border=1 cellspacing=0> <tr class=\"headRow\"> "
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager

      cluster.waitActive();
     
      int initialLookups = sm.lookups;
      assertTrue("dns security manager is active", initialLookups != 0);
     
      DatanodeManager dm =
          cluster.getNamesystem().getBlockManager().getDatanodeManager();
     
      // make sure no lookups occur
      dm.refreshNodes(conf);
      assertEquals(initialLookups, sm.lookups);

      dm.refreshNodes(conf);
      assertEquals(initialLookups, sm.lookups);
     
      // ensure none of the reports trigger lookups
      dm.getDatanodeListForReport(DatanodeReportType.ALL);
      assertEquals(initialLookups, sm.lookups);
     
      dm.getDatanodeListForReport(DatanodeReportType.LIVE);
      assertEquals(initialLookups, sm.lookups);
     
      dm.getDatanodeListForReport(DatanodeReportType.DEAD);
      assertEquals(initialLookups, sm.lookups);
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager

    // Bring up two additional datanodes that need both of their volumes
    // functioning in order to stay up.
    conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 0);
    cluster.startDataNodes(conf, 2, true, null, null);
    cluster.waitActive();
    final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
        ).getDatanodeManager();
    long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
    long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);

    // Fail a volume on the 2nd DN
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager

   *  a failed volume by the NN.
   */
  @Test
  public void testFailedVolumeOnStartupIsCounted() throws Exception {
    assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
    final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
    ).getDatanodeManager();
    long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
    File dir = new File(cluster.getInstanceStorageDir(0, 0), "current");

    try {
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.