Package org.apache.hadoop.net

Examples of org.apache.hadoop.net.NetworkTopology


      if (reached >= 0) {
        NameNode.stateChangeLog.info("STATE* Safe mode is OFF.");
      }
      reached = -1;
      safeMode = null;
      final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
      NameNode.stateChangeLog.info("STATE* Network topology has "
          + nt.getNumOfRacks() + " racks and "
          + nt.getNumOfLeaves() + " datanodes");
      NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has "
          + blockManager.numOfUnderReplicatedBlocks() + " blocks");
    }
View Full Code Here


    return makeSplit(path, start, size,
                     getSplitHosts(blkLocations, start, size, clusterMap));
  }

  public List<InputSplit> getSplits(JobConf job, FileStatus file, String pattern, long splitSize) throws IOException {
    NetworkTopology clusterMap = new NetworkTopology();
    List<InputSplit> splits = new ArrayList<InputSplit>();
    Path path = file.getPath();
    long length = file.getLen();
    FileSystem fs = file.getPath().getFileSystem(job);
    BlockLocation[] blkLocations = fs.getFileBlockLocations(file, 0, length);
View Full Code Here

    long minSize = Math.max(job.getLong(org.apache.hadoop.mapreduce.lib.input.
      FileInputFormat.SPLIT_MINSIZE, 1), minSplitSize);

    // generate splits
    ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
    NetworkTopology clusterMap = new NetworkTopology();
    for (FileStatus file: files) {
      Path path = file.getPath();
      long length = file.getLen();
      if (length != 0) {
        FileSystem fs = path.getFileSystem(job);
View Full Code Here

      if (reached >= 0) {
        NameNode.stateChangeLog.info("STATE* Safe mode is OFF");
      }
      reached = -1;
      safeMode = null;
      final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
      NameNode.stateChangeLog.info("STATE* Network topology has "
          + nt.getNumOfRacks() + " racks and "
          + nt.getNumOfLeaves() + " datanodes");
      NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has "
          + blockManager.numOfUnderReplicatedBlocks() + " blocks");

      startSecretManagerIfNecessary();
    }
View Full Code Here

    return goodBlock;
  }
 
  /* reset all fields in a balancer preparing for the next iteration */
  private void resetData() {
    this.cluster = new NetworkTopology();
    this.overUtilizedDatanodes.clear();
    this.aboveAvgUtilizedDatanodes.clear();
    this.belowAvgUtilizedDatanodes.clear();
    this.underUtilizedDatanodes.clear();
    this.datanodes.clear();
View Full Code Here

    rackA = nodes.subList(0, 3);
    rackB = nodes.subList(3, 6);
  }

  private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) {
    NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
    // construct network topology
    for (DatanodeDescriptor dn : nodesToAdd) {
      cluster.add(dn);
      dn.getStorageInfos()[0].setUtilizationForTesting(
          2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
          2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L);
      dn.updateHeartbeat(
          BlockManagerTestUtil.getStorageReportsForDatanode(dn), 0L, 0L, 0, 0);
View Full Code Here

      bm.getDatanodeManager().checkIfClusterIsNowMultiRack(dn);
    }
  }

  private void removeNode(DatanodeDescriptor deadNode) {
    NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
    cluster.remove(deadNode);
    bm.removeBlocksAssociatedTo(deadNode);
  }
View Full Code Here

      long fileLen = blockSize * NUM_BLOCKS;
      DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
     
      // Create an under-replicated file
      NameNode namenode = cluster.getNameNode();
      NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
          .getDatanodeManager().getNetworkTopology();
      Map<String,String[]> pmap = new HashMap<String, String[]>();
      Writer result = new StringWriter();
      PrintWriter out = new PrintWriter(result, true);
      InetAddress remoteAddress = InetAddress.getLocalHost();
View Full Code Here

      long fileLen = blockSize * NUM_BLOCKS;
      DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
     
      // Create an under-replicated file
      NameNode namenode = cluster.getNameNode();
      NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
          .getDatanodeManager().getNetworkTopology();
      // Add a new node on different rack, so previous blocks' replicas
      // are considered to be misplaced
      nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
      NUM_DN++;
     
      Map<String,String[]> pmap = new HashMap<String, String[]>();
      Writer result = new StringWriter();
      PrintWriter out = new PrintWriter(result, true);
View Full Code Here

    // Number of replicas to actually start
    final short NUM_REPLICAS = 1;

    Configuration conf = new Configuration();
    NameNode namenode = mock(NameNode.class);
    NetworkTopology nettop = mock(NetworkTopology.class);
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    FSNamesystem fsName = mock(FSNamesystem.class);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.net.NetworkTopology

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.