Examples of NumberReplicas


Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas

  /* updates a block in under replication queue */
  void updateNeededReplications(Block block, int curReplicasDelta,
      int expectedReplicasDelta) {
    synchronized (namesystem) {
      NumberReplicas repl = countNodes(block);
      int curExpectedReplicas = getReplication(block);
      if (isNeededReplication(block, curExpectedReplicas, repl.liveReplicas())) {
        neededReplications.update(block, repl.liveReplicas(), repl
            .decommissionedReplicas(), curExpectedReplicas, curReplicasDelta,
            expectedReplicasDelta);
      } else {
        int oldReplicas = repl.liveReplicas()-curReplicasDelta;
        int oldExpectedReplicas = curExpectedReplicas-expectedReplicasDelta;
        neededReplications.remove(block, oldReplicas, repl.decommissionedReplicas(),
                                  oldExpectedReplicas);
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas

    }
  }

  void checkReplication(Block block, int numExpectedReplicas) {
    // filter out containingNodes that are marked for decommission.
    NumberReplicas number = countNodes(block);
    if (isNeededReplication(block, numExpectedReplicas, number.liveReplicas())) {
      neededReplications.add(block,
                             number.liveReplicas(),
                             number.decommissionedReplicas,
                             numExpectedReplicas);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas

      DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
     
      Block b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
      final FSNamesystem namesystem = cluster.getNamesystem();
      int numRacks = namesystem.blockManager.getNumberOfRacks(b);
      NumberReplicas number = namesystem.blockManager.countNodes(b);
      int curReplicas = number.liveReplicas();
      int neededReplicationSize =
                           namesystem.blockManager.neededReplications.size();
     
      //Add a new datanode on a different rack
      String newRacks[] = {"/rack2"} ;
      cluster.startDataNodes(conf, 1, true, null, newRacks);

      while ( (numRacks < 2) || (curReplicas < REPLICATION_FACTOR) ||
              (neededReplicationSize > 0) ) {
        LOG.info("Waiting for replication");
        Thread.sleep(600);
        numRacks = namesystem.blockManager.getNumberOfRacks(b);
        number = namesystem.blockManager.countNodes(b);
        curReplicas = number.liveReplicas();
        neededReplicationSize =
                           namesystem.blockManager.neededReplications.size();
      }

      LOG.info("curReplicas = " + curReplicas);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas

      DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
     
      Block b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
      final FSNamesystem namesystem = cluster.getNamesystem();
      int numRacks = namesystem.blockManager.getNumberOfRacks(b);
      NumberReplicas number = namesystem.blockManager.countNodes(b);
      int curReplicas = number.liveReplicas();
      int neededReplicationSize =
                           namesystem.blockManager.neededReplications.size();
     
      //Add a new datanode on a different rack
      String newRacks[] = {"/rack2","/rack2","/rack2"} ;
      cluster.startDataNodes(conf, 3, true, null, newRacks);
      REPLICATION_FACTOR = 5;
      namesystem.setReplication(FILE_NAME, REPLICATION_FACTOR);

      while ( (numRacks < 2) || (curReplicas < REPLICATION_FACTOR) ||
              (neededReplicationSize > 0) ) {
        LOG.info("Waiting for replication");
        Thread.sleep(600);
        numRacks = namesystem.blockManager.getNumberOfRacks(b);
        number = namesystem.blockManager.countNodes(b);
        curReplicas = number.liveReplicas();
        neededReplicationSize =
                           namesystem.blockManager.neededReplications.size();
      }

      LOG.info("curReplicas = " + curReplicas);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas

      // restart the first datanode
      cluster.restartDataNode(dnprop);
      cluster.waitActive();
     
      // check if excessive replica is detected
      NumberReplicas num = null;
      do {
       synchronized (namesystem) {
         num = namesystem.countNodes(block);
       }
      } while (num.excessReplicas() == 0);
     
      // find out a non-excess node
      Iterator<DatanodeDescriptor> iter = namesystem.blocksMap.nodeIterator(block);
      DatanodeDescriptor nonExcessDN = null;
      while (iter.hasNext()) {
        DatanodeDescriptor dn = iter.next();
        Collection<Block> blocks = namesystem.excessReplicateMap.get(dn.getStorageID());
        if (blocks == null || !blocks.contains(block) ) {
          nonExcessDN = dn;
          break;
        }
      }
      assertTrue(nonExcessDN!=null);
     
      // bring down non excessive datanode
      dnprop = cluster.stopDataNode(nonExcessDN.getName());
      // make sure that NN detects that the datanode is down
      synchronized (namesystem.heartbeats) {
        nonExcessDN.setLastUpdate(0); // mark it dead
        namesystem.heartbeatCheck();
      }
     
      // The block should be replicated
      do {
        num = namesystem.countNodes(block);
      } while (num.liveReplicas() != REPLICATION_FACTOR);
     
      // restart the first datanode
      cluster.restartDataNode(dnprop);
      cluster.waitActive();
     
      // check if excessive replica is detected
      do {
       num = namesystem.countNodes(block);
      } while (num.excessReplicas() != 2);
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas

      // restart the first datanode
      cluster.restartDataNode(dnprop);
      cluster.waitActive();
     
      // check if excessive replica is detected
      NumberReplicas num = null;
      do {
       synchronized (namesystem) {
         num = namesystem.blockManager.countNodes(block);
       }
      } while (num.excessReplicas() == 0);
     
      // find out a non-excess node
      Iterator<DatanodeDescriptor> iter = namesystem.blockManager.blocksMap.nodeIterator(block);
      DatanodeDescriptor nonExcessDN = null;
      while (iter.hasNext()) {
        DatanodeDescriptor dn = iter.next();
        Collection<Block> blocks = namesystem.blockManager.excessReplicateMap.get(dn.getStorageID());
        if (blocks == null || !blocks.contains(block) ) {
          nonExcessDN = dn;
          break;
        }
      }
      assertTrue(nonExcessDN!=null);
     
      // bring down non excessive datanode
      dnprop = cluster.stopDataNode(nonExcessDN.getName());
      // make sure that NN detects that the datanode is down
      synchronized (namesystem.heartbeats) {
        nonExcessDN.setLastUpdate(0); // mark it dead
        namesystem.heartbeatCheck();
      }
     
      // The block should be replicated
      do {
        namesystem.writeLock();
        num = namesystem.blockManager.countNodes(block);
        namesystem.writeUnlock();
      } while (num.liveReplicas() != REPLICATION_FACTOR);
     
      // restart the first datanode
      cluster.restartDataNode(dnprop);
      cluster.waitActive();
     
      // check if excessive replica is detected
      do {
        namesystem.writeLock();
        num = namesystem.blockManager.countNodes(block);
        namesystem.writeUnlock();
      } while (num.excessReplicas() != 2);
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas

      out.println("Metasave: Blocks waiting for replication: " +
                  neededReplications.size());
      for (Block block : neededReplications) {
        List<DatanodeDescriptor> containingNodes =
                                          new ArrayList<DatanodeDescriptor>();
        NumberReplicas numReplicas = new NumberReplicas();
        // source node returned is not used
        chooseSourceDatanode(block, containingNodes, numReplicas);
        int usableReplicas = numReplicas.liveReplicas() +
                             numReplicas.decommissionedReplicas();
      
        if (block instanceof BlockInfo) {
          String fileName = ((BlockInfo)block).getINode().getFullPathName();
          out.print(fileName + ": ");
        }
        // l: == live:, d: == decommissioned c: == corrupt e: == excess
        out.print(block + ((usableReplicas > 0)? "" : " MISSING") +
                  " (replicas:" +
                  " l: " + numReplicas.liveReplicas() +
                  " d: " + numReplicas.decommissionedReplicas() +
                  " c: " + numReplicas.corruptReplicas() +
                  " e: " + numReplicas.excessReplicas() + ") ");

        Collection<DatanodeDescriptor> corruptNodes =
                                      corruptReplicas.getNodes(block);
       
        for (Iterator<DatanodeDescriptor> jt = blocksMap.nodeIterator(block);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas

        requiredReplication = fileINode.getReplication();

        // get a source data-node
        containingNodes = new ArrayList<DatanodeDescriptor>();
        NumberReplicas numReplicas = new NumberReplicas();
        srcNode = chooseSourceDatanode(block, containingNodes, numReplicas);
        if ((numReplicas.liveReplicas() + numReplicas.decommissionedReplicas())
            <= 0) {
          missingBlocksInCurIter++;
        }
        if(srcNode == null) // block can not be replicated from any node
          return false;

        // do not schedule more if enough replicas is already pending
        numEffectiveReplicas = numReplicas.liveReplicas() +
                                pendingReplications.getNumReplicas(block);
     
        if (numEffectiveReplicas >= requiredReplication) {
          if ( (pendingReplications.getNumReplicas(block) > 0) ||
               (blockHasEnoughRacks(block)) ) {
            neededReplications.remove(block, priority); // remove from neededReplications
            replIndex--;
            NameNode.stateChangeLog.info("BLOCK* "
                + "Removing block " + block
                + " from neededReplications as it has enough replicas.");
            return false;
          }
        }

        if (numReplicas.liveReplicas() < requiredReplication) {
          additionalReplRequired = requiredReplication - numEffectiveReplicas;
        } else {
          additionalReplRequired = 1; //Needed on a new rack
        }

      }
    } finally {
      namesystem.writeUnlock();
    }

    // choose replication targets: NOT HOLDING THE GLOBAL LOCK
    // It is costly to extract the filename for which chooseTargets is called,
    // so for now we pass in the Inode itself.
    DatanodeDescriptor targets[] =
                       replicator.chooseTarget(fileINode, additionalReplRequired,
                       srcNode, containingNodes, block.getNumBytes());
    if(targets.length == 0)
      return false;

    namesystem.writeLock();
    try {
      synchronized (neededReplications) {
        // Recheck since global lock was released
        // block should belong to a file
        fileINode = blocksMap.getINode(block);
        // abandoned block or block reopened for append
        if(fileINode == null || fileINode.isUnderConstruction()) {
          neededReplications.remove(block, priority); // remove from neededReplications
          replIndex--;
          return false;
        }
        requiredReplication = fileINode.getReplication();

        // do not schedule more if enough replicas is already pending
        NumberReplicas numReplicas = countNodes(block);
        numEffectiveReplicas = numReplicas.liveReplicas() +
        pendingReplications.getNumReplicas(block);

        if (numEffectiveReplicas >= requiredReplication) {
          if ( (pendingReplications.getNumReplicas(block) > 0) ||
               (blockHasEnoughRacks(block)) ) {
            neededReplications.remove(block, priority); // remove from neededReplications
            replIndex--;
            NameNode.stateChangeLog.info("BLOCK* "
                + "Removing block " + block
                + " from neededReplications as it has enough replicas.");
            return false;
          }
        }

        if ( (numReplicas.liveReplicas() >= requiredReplication) &&
             (!blockHasEnoughRacks(block)) ) {
          if (srcNode.getNetworkLocation().equals(targets[0].getNetworkLocation())) {
            //No use continuing, unless a new rack in this case
            return false;
          }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas

    Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
    if (timedOutItems != null) {
      namesystem.writeLock();
      try {
        for (int i = 0; i < timedOutItems.length; i++) {
          NumberReplicas num = countNodes(timedOutItems[i]);
          if (isNeededReplication(timedOutItems[i], getReplication(timedOutItems[i]),
                                 num.liveReplicas())) {
            neededReplications.add(timedOutItems[i],
                                   num.liveReplicas(),
                                   num.decommissionedReplicas(),
                                   getReplication(timedOutItems[i]));
          }
        }
      } finally {
        namesystem.writeUnlock();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas

          + "Redundant addStoredBlock request received for " + storedBlock
          + " on " + node.getName() + " size " + storedBlock.getNumBytes());
    }

    // filter out containingNodes that are marked for decommission.
    NumberReplicas num = countNodes(storedBlock);
    int numLiveReplicas = num.liveReplicas();
    int numCurrentReplica = numLiveReplicas
      + pendingReplications.getNumReplicas(storedBlock);

    if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
        numLiveReplicas >= minReplication)
      storedBlock = completeBlock(fileINode, storedBlock);

    // check whether safe replication is reached for the block
    // only complete blocks are counted towards that
    if(storedBlock.isComplete())
      namesystem.incrementSafeBlockCount(numCurrentReplica);

    // if file is under construction, then check whether the block
    // can be completed
    if (fileINode.isUnderConstruction()) {
      return storedBlock;
    }

    // do not handle mis-replicated blocks during startup
    if (namesystem.isInSafeMode())
      return storedBlock;

    // handle underReplication/overReplication
    short fileReplication = fileINode.getReplication();
    if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
      neededReplications.remove(storedBlock, numCurrentReplica,
          num.decommissionedReplicas, fileReplication);
    } else {
      updateNeededReplications(storedBlock, curReplicaDelta, 0);
    }
    if (numCurrentReplica > fileReplication) {
      processOverReplicatedBlock(storedBlock, fileReplication, node, delNodeHint);
    }
    // If the file replication has reached desired value
    // we can remove any corrupt replicas the block may have
    int corruptReplicasCount = corruptReplicas.numCorruptReplicas(storedBlock);
    int numCorruptNodes = num.corruptReplicas();
    if (numCorruptNodes != corruptReplicasCount) {
      FSNamesystem.LOG.warn("Inconsistent number of corrupt replicas for " +
          storedBlock + "blockMap has " + numCorruptNodes +
          " but corrupt replicas map has " + corruptReplicasCount);
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.