Package org.apache.hadoop.net

Examples of org.apache.hadoop.net.NetworkTopology$InnerNode


    return goodBlock;
  }
 
  /* reset all fields in a balancer preparing for the next iteration */
  private void resetData() {
    this.cluster = new NetworkTopology();
    this.overUtilizedDatanodes.clear();
    this.aboveAvgUtilizedDatanodes.clear();
    this.belowAvgUtilizedDatanodes.clear();
    this.underUtilizedDatanodes.clear();
    this.datanodes.clear();
View Full Code Here


    long minSize = Math.max(job.getLong("mapred.min.split.size", 1),
                            minSplitSize);

    // generate splits
    ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
    NetworkTopology clusterMap = new NetworkTopology();
    for (FileStatus file: files) {
      Path path = file.getPath();
      FileSystem fs = path.getFileSystem(job);
      long length = file.getLen();
      BlockLocation[] blkLocations = fs.getFileBlockLocations(file, 0, length);
View Full Code Here

      long fileLen = blockSize * NUM_BLOCKS;
      DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
     
      // Create an under-replicated file
      NameNode namenode = cluster.getNameNode();
      NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
          .getDatanodeManager().getNetworkTopology();
      Map<String,String[]> pmap = new HashMap<String, String[]>();
      Writer result = new StringWriter();
      PrintWriter out = new PrintWriter(result, true);
      InetAddress remoteAddress = InetAddress.getLocalHost();
View Full Code Here

      long fileLen = blockSize * NUM_BLOCKS;
      DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
     
      // Create an under-replicated file
      NameNode namenode = cluster.getNameNode();
      NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
          .getDatanodeManager().getNetworkTopology();
      // Add a new node on different rack, so previous blocks' replicas
      // are considered to be misplaced
      nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
      NUM_DN++;
     
      Map<String,String[]> pmap = new HashMap<String, String[]>();
      Writer result = new StringWriter();
      PrintWriter out = new PrintWriter(result, true);
View Full Code Here

    // Number of replicas to actually start
    final short NUM_REPLICAS = 1;

    Configuration conf = new Configuration();
    NameNode namenode = mock(NameNode.class);
    NetworkTopology nettop = mock(NetworkTopology.class);
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    FSNamesystem fsName = mock(FSNamesystem.class);
View Full Code Here

      if (reached >= 0) {
        NameNode.stateChangeLog.info("STATE* Safe mode is OFF");
      }
      reached = -1;
      safeMode = null;
      final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
      NameNode.stateChangeLog.info("STATE* Network topology has "
          + nt.getNumOfRacks() + " racks and "
          + nt.getNumOfLeaves() + " datanodes");
      NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has "
          + blockManager.numOfUnderReplicatedBlocks() + " blocks");

      startSecretManagerIfNecessary();

View Full Code Here

      if (reached >= 0) {
        NameNode.stateChangeLog.info("STATE* Safe mode is OFF.");
      }
      reached = -1;
      safeMode = null;
      final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
      NameNode.stateChangeLog.info("STATE* Network topology has "
          + nt.getNumOfRacks() + " racks and "
          + nt.getNumOfLeaves() + " datanodes");
      NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has "
          + blockManager.numOfUnderReplicatedBlocks() + " blocks");

      startSecretManagerIfNecessary();
    }
View Full Code Here

    return goodBlock;
  }
 
  /* reset all fields in a balancer preparing for the next iteration */
  private void resetData() {
    this.cluster = new NetworkTopology();
    this.overUtilizedDatanodes.clear();
    this.aboveAvgUtilizedDatanodes.clear();
    this.belowAvgUtilizedDatanodes.clear();
    this.underUtilizedDatanodes.clear();
    this.datanodes.clear();
View Full Code Here

    return goodBlock;
  }
 
  /* reset all fields in a balancer preparing for the next iteration */
  private void resetData() {
    this.cluster = new NetworkTopology();
    this.overUtilizedDatanodes.clear();
    this.aboveAvgUtilizedDatanodes.clear();
    this.belowAvgUtilizedDatanodes.clear();
    this.underUtilizedDatanodes.clear();
    this.datanodes.clear();
View Full Code Here

   * FSNamesystem constructor.
   */
  FSNamesystem(NameNode nn, Configuration conf) throws IOException {
    super(conf);
    try {
      clusterMap = new NetworkTopology(conf);
      initialize(nn, getConf());
      pathValidator = new PathValidator(conf);
    } catch (IOException e) {
      LOG.error(getClass().getSimpleName() + " initialization failed.", e);
      close();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.net.NetworkTopology$InnerNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.