Package org.apache.hadoop.hdfs.server.datanode

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode


      final long size = lb.getBlockSize();
      if (i < numblock - 1) {
        assertEquals(BLOCK_SIZE, size);
      }
      for(DatanodeInfo datanodeinfo : lb.getLocations()) {
        final DataNode dn = cluster.getDataNode(datanodeinfo.getIpcPort());
        final BlockMetaDataInfo metainfo = dn.getBlockMetaDataInfo(blk);
        assertEquals(size, metainfo.getNumBytes());
      }
    }
  }
View Full Code Here


      }
      Configuration newconf = new Configuration(dnConf); // save config
      if (hosts != null) {
        NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
      }
      DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf);
      //since the HDFS does things based on IP:port, we need to add the mapping
      //for IP:port to rackId
      String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
      if (racks != null) {
        int port = dn.getSelfAddr().getPort();
        System.out.println("Adding node with IP:port : " + ipAddr + ":" + port+
                            " to rack " + racks[i-curDatanodesNum]);
        StaticMapping.addNodeToRack(ipAddr + ":" + port,
                                  racks[i-curDatanodesNum]);
      }
View Full Code Here

   * Gets a list of the started DataNodes.  May be empty.
   */
  public ArrayList<DataNode> getDataNodes() {
    ArrayList<DataNode> list = new ArrayList<DataNode>();
    for (int i = 0; i < dataNodes.size(); i++) {
      DataNode node = dataNodes.get(i).datanode;
      list.add(node);
    }
    return list;
  }
View Full Code Here

   * is left running so that new DataNodes may be started.
   */
  public void shutdownDataNodes() {
    for (int i = dataNodes.size()-1; i >= 0; i--) {
      System.out.println("Shutting down DataNode " + i);
      DataNode dn = dataNodes.remove(i).datanode;
      dn.shutdown();
      numDataNodes--;
    }
  }
View Full Code Here

  DataNodeProperties stopDataNode(int i) {
    if (i < 0 || i >= dataNodes.size()) {
      return null;
    }
    DataNodeProperties dnprop = dataNodes.remove(i);
    DataNode dn = dnprop.datanode;
    System.out.println("MiniDFSCluster Stopping DataNode " +
                       dn.dnRegistration.getName() +
                       " from a total of " + (dataNodes.size() + 1) +
                       " datanodes.");
    dn.shutdown();
    numDataNodes--;
    return dnprop;
  }
View Full Code Here

   * Shutdown a datanode by name.
   */
  public synchronized DataNodeProperties stopDataNode(String name) {
    int i;
    for (i = 0; i < dataNodes.size(); i++) {
      DataNode dn = dataNodes.get(i).datanode;
      if (dn.dnRegistration.getName().equals(name)) {
        break;
      }
    }
    return stopDataNode(i);
View Full Code Here

    nni.updateAvatarConf(conf);
    startAvatarNode(nni, null);

    // Refresh datanodes with the newly started namenode
    for (DataNodeProperties dn : dataNodes) {
      DataNode datanode = dn.datanode;
      datanode.refreshNamenodes(conf);
    }
    // Wait for new namenode to get registrations from all the datanodes
    waitDataNodesActive(nnIndex);
    return nni;
  }
View Full Code Here

  @Test
  public void testRefreshNamenodes() throws Exception {
    MiniAvatarCluster cluster = null;
    try {
      cluster = new MiniAvatarCluster(conf, 1, true, null, null, 1, true);
      DataNode dn = cluster.getDataNodes().get(0);
      assertEquals(dn.getAllNamespaceServices().length, 1);
     
      cluster.addNameNode(conf);
      assertEquals(dn.getAllNamespaceServices().length, 2);
     
      cluster.addNameNode(conf);
      assertEquals(dn.getAllNamespaceServices().length, 3);
     
      cluster.addNameNode(conf);
      assertEquals(dn.getAllNamespaceServices().length, 4);
      int[] nns = null;
      nns = new int[]{0, 1, 2, 3};
      compareAddress(cluster, dn, nns);
      nns = new int[]{0, 1};
      Configuration conf1 = new Configuration(conf);
      setupAddress(conf1, new int[]{0, 1});
      dn.refreshNamenodes(conf1);
      waitDataNodeInitialized(dn);
      compareAddress(cluster, dn, nns);
     
      nns = new int[]{0,2,3};
      Configuration conf2 = new Configuration(conf);
      setupAddress(conf2, new int[]{0,2,3});
      dn.refreshNamenodes(conf2);
      waitDataNodeInitialized(dn);
      compareAddress(cluster, dn, nns);
    } finally {
      cluster.shutDown();
    }
View Full Code Here

      }
      Configuration newconf = new Configuration(dnConf); // save config
      if (hosts != null) {
        NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
      }
      DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf);
      if(dn == null)
        throw new IOException("Cannot start DataNode in " +
          conf.get("dfs.data.dir"));
      //since the HDFS does things based on IP:port, we need to add the mapping
      //for IP:port to rackId
      String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
      if (racks != null) {
        int port = dn.getSelfAddr().getPort();
        System.out.println("Adding node with IP:port : " + ipAddr + ":" + port+
                            " to rack " + racks[i-curDatanodesNum]);
        StaticMapping.addNodeToRack(ipAddr + ":" + port,
                                  racks[i-curDatanodesNum]);
      }
      dn.runDatanodeDaemon();
      waitDataNodeInitialized(dn);
      dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs));
    }
    curDatanodesNum += numDataNodes;
    this.numDataNodes += numDataNodes;
View Full Code Here

   * Gets a list of the started DataNodes.  May be empty.
   */
  public ArrayList<DataNode> getDataNodes() {
    ArrayList<DataNode> list = new ArrayList<DataNode>();
    for (int i = 0; i < dataNodes.size(); i++) {
      DataNode node = dataNodes.get(i).datanode;
      list.add(node);
    }
    return list;
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.datanode.DataNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.