Examples of datanodeReport()


Examples of org.apache.hadoop.hdfs.DFSClient.datanodeReport()

      for (int i = 0; i < 10 && !gotHeartbeat; i++) {
        try {
          Thread.sleep(i*1000);
        } catch (InterruptedException ie) {}

        report = client.datanodeReport(DatanodeReportType.ALL);
        gotHeartbeat = (report[0].getLastUpdate() > firstUpdateAfterRestart);
      }
      if (!gotHeartbeat) {
        fail("Never got a heartbeat from restarted datanode.");
      }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.datanodeReport()

     
      // add a new datanode to the cluster
      cluster.startDataNodes(CONF, 1, true, null, NEW_RACKS);
      cluster.waitActive();
     
      DatanodeInfo[] datanodes = client.datanodeReport(DatanodeReportType.ALL);

      // find out the new node
      DatanodeInfo newNode=null;
      for(DatanodeInfo node:datanodes) {
        Boolean isNewNode = true;
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.datanodeReport()

  @Test
  public void testDecommissionStatus() throws IOException, InterruptedException {
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster
        .getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    assertEquals("Number of Datanodes ", 2, info.length);
    FileSystem fileSys = cluster.getFileSystem();

    short replicas = 2;
    //
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.datanodeReport()

   */
  public int printTopology() throws IOException {
    if (fs instanceof DistributedFileSystem) {
      DistributedFileSystem dfs = (DistributedFileSystem)fs;
      DFSClient client = dfs.getClient();
      DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
     
      // Build a map of rack -> nodes from the datanode report
      HashMap<String, TreeSet<String> > tree = new HashMap<String, TreeSet<String>>();
      for(DatanodeInfo dni : report) {
        String location = dni.getNetworkLocation();
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.datanodeReport()

     
      // add a new datanode to the cluster
      cluster.startDataNodes(CONF, 1, true, null, NEW_RACKS);
      cluster.waitActive();
     
      DatanodeInfo[] datanodes = client.datanodeReport(DatanodeReportType.ALL);

      // find out the new node
      DatanodeInfo newNode=null;
      for(DatanodeInfo node:datanodes) {
        Boolean isNewNode = true;
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.datanodeReport()

  @Test
  public void testDecommissionStatus() throws IOException, InterruptedException {
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster
        .getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    assertEquals("Number of Datanodes ", 2, info.length);
    FileSystem fileSys = cluster.getFileSystem();

    short replicas = 2;
    //
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.datanodeReport()

     
      // add a fourth datanode to the cluster
      cluster.startDataNodes(CONF, 1, true, null, NEW_RACKS);
      cluster.waitActive();
     
      DatanodeInfo[] datanodes = client.datanodeReport(DatanodeReportType.ALL);

      // find out the new node
      DatanodeInfo newNode=null;
      for(DatanodeInfo node:datanodes) {
        Boolean isNewNode = true;
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.datanodeReport()

    System.out.println("HDFS NameNode: " + conf.get("fs.default.name"));
    DFSClient dfsClient = null;
    try {
      dfsClient = new DFSClient(conf);
     
      DatanodeInfo[] liveNodes = dfsClient.datanodeReport(DatanodeReportType.LIVE);
      for (DatanodeInfo dni : liveNodes) {
        long dfsUsed = dni.getDfsUsed();
        long nonDfsUsed = dni.getNonDfsUsed();
        long capacity = dni.getCapacity();
        float capacityPercentage = ((float)(dfsUsed + nonDfsUsed) / (float)capacity) * 100.0f;
 
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.datanodeReport()

        long capacity = dni.getCapacity();
        float capacityPercentage = ((float)(dfsUsed + nonDfsUsed) / (float)capacity) * 100.0f;
        System.out.println(String.format("%s DataNode - [ ALIVE ] - DFS Capacity: (%d + %d / %d) %.2f%%; xceivers: %d",
               new Object[] { dni.getHostName(), dfsUsed, nonDfsUsed, capacity, capacityPercentage, dni.getXceiverCount() }));
      }
      DatanodeInfo[] deadNodes = dfsClient.datanodeReport(DatanodeReportType.DEAD);
      if (deadNodes.length > 0) {
        retCode = 2;
        for (DatanodeInfo dni : deadNodes) {
          System.out.println(dni.getHostName() + " DataNode - [ DEAD ]");
        }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.datanodeReport()

     
      // add a fourth datanode to the cluster
      cluster.startDataNodes(CONF, 1, true, null, NEW_RACKS);
      cluster.waitActive();
     
      DatanodeInfo[] datanodes = client.datanodeReport(DatanodeReportType.ALL);

      // find out the new node
      DatanodeInfo newNode=null;
      for(DatanodeInfo node:datanodes) {
        Boolean isNewNode = true;
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.