Package org.apache.hadoop.net

Examples of org.apache.hadoop.net.NetworkTopology


      long fileLen = blockSize * NUM_BLOCKS;
      DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
     
      // Create an under-replicated file
      NameNode namenode = cluster.getNameNode();
      NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
          .getDatanodeManager().getNetworkTopology();
      Map<String,String[]> pmap = new HashMap<String, String[]>();
      Writer result = new StringWriter();
      PrintWriter out = new PrintWriter(result, true);
      InetAddress remoteAddress = InetAddress.getLocalHost();
View Full Code Here


    return goodBlock;
  }
 
  /* reset all fields in a balancer preparing for the next iteration */
  private void resetData() {
    this.cluster = new NetworkTopology();
    this.overUtilizedDatanodes.clear();
    this.aboveAvgUtilizedDatanodes.clear();
    this.belowAvgUtilizedDatanodes.clear();
    this.underUtilizedDatanodes.clear();
    this.datanodes.clear();
View Full Code Here

      if (reached >= 0) {
        NameNode.stateChangeLog.info("STATE* Safe mode is OFF.");
      }
      reached = -1;
      safeMode = null;
      final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
      NameNode.stateChangeLog.info("STATE* Network topology has "
          + nt.getNumOfRacks() + " racks and "
          + nt.getNumOfLeaves() + " datanodes");
      NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has "
          + blockManager.numOfUnderReplicatedBlocks() + " blocks");
    }
View Full Code Here

    return goodBlock;
  }
 
  /* reset all fields in a balancer preparing for the next iteration */
  private void resetData() {
    this.cluster = new NetworkTopology();
    this.overUtilizedDatanodes.clear();
    this.aboveAvgUtilizedDatanodes.clear();
    this.belowAvgUtilizedDatanodes.clear();
    this.underUtilizedDatanodes.clear();
    this.datanodes.clear();
View Full Code Here

    return goodBlock;
  }
 
  /* reset all fields in a balancer preparing for the next iteration */
  private void resetData() {
    this.cluster = new NetworkTopology();
    this.overUtilizedDatanodes.clear();
    this.aboveAvgUtilizedDatanodes.clear();
    this.belowAvgUtilizedDatanodes.clear();
    this.underUtilizedDatanodes.clear();
    this.datanodes.clear();
View Full Code Here

      if (reached >= 0) {
        NameNode.stateChangeLog.info("STATE* Safe mode is OFF");
      }
      reached = -1;
      safeMode = null;
      final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
      NameNode.stateChangeLog.info("STATE* Network topology has "
          + nt.getNumOfRacks() + " racks and "
          + nt.getNumOfLeaves() + " datanodes");
      NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has "
          + blockManager.numOfUnderReplicatedBlocks() + " blocks");

      startSecretManagerIfNecessary();
    }
View Full Code Here

    long minSize = Math.max(job.getLong(org.apache.hadoop.mapreduce.lib.input.
      FileInputFormat.SPLIT_MINSIZE, 1), minSplitSize);

    // generate splits
    ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
    NetworkTopology clusterMap = new NetworkTopology();
    for (FileStatus file: files) {
      Path path = file.getPath();
      FileSystem fs = path.getFileSystem(job);
      long length = file.getLen();
      BlockLocation[] blkLocations = fs.getFileBlockLocations(file, 0, length);
View Full Code Here

    int numBlocks = 3;
    int block1Size = 100, block2Size = 150, block3Size = 75;
    int fileSize = block1Size + block2Size + block3Size;
    int replicationFactor = 3;
    NetworkTopology clusterMap = new NetworkTopology();
   
    BlockLocation[] bs = new BlockLocation[numBlocks];
   
    String [] block1Hosts = {"host1","host2","host3"};
    String [] block1Names = {"host1:100","host2:100","host3:100"};
View Full Code Here

    return goodBlock;
  }
 
  /* reset all fields in a balancer preparing for the next iteration */
  private void resetData() {
    this.cluster = new NetworkTopology();
    this.overUtilizedDatanodes.clear();
    this.aboveAvgUtilizedDatanodes.clear();
    this.belowAvgUtilizedDatanodes.clear();
    this.underUtilizedDatanodes.clear();
    this.datanodes.clear();
View Full Code Here

    return goodBlock;
  }
 
  /* reset all fields in a balancer preparing for the next iteration */
  private void resetData() {
    this.cluster = new NetworkTopology();
    this.overUtilizedDatanodes.clear();
    this.aboveAvgUtilizedDatanodes.clear();
    this.belowAvgUtilizedDatanodes.clear();
    this.underUtilizedDatanodes.clear();
    this.datanodes.clear();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.net.NetworkTopology

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.