Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.BlockListAsLongs


    long startTime = now();
    if (startTime - lastBlockReport > dnConf.blockReportInterval) {

      // Create block report
      long brCreateStartTime = now();
      BlockListAsLongs bReport = dn.data.getBlockReport(getBlockPoolId());

      // Send block report
      long brSendStartTime = now();
      cmd = bpNamenode.blockReport(bpRegistration, getBlockPoolId(), bReport
          .getBlockListAsLongs());

      // Log the block report processing stats from Datanode perspective
      long brSendCost = now() - brSendStartTime;
      long brCreateCost = brSendStartTime - brCreateStartTime;
      dn.metrics.addBlockReport(brSendCost);
      LOG.info("BlockReport of " + bReport.getNumberOfBlocks()
          + " blocks took " + brCreateCost + " msec to generate and "
          + brSendCost + " msecs for RPC and NN processing");

      // If we have sent the first block report, then wait a random
      // time before we start the periodic block reports.
View Full Code Here


    void formBlockReport() {
      // fill remaining slots with blocks that do not exist
      for(int idx = blocks.size()-1; idx >= nrBlocks; idx--)
        blocks.set(idx, new Block(blocks.size() - idx, 0, 0));
      blockReportList = new BlockListAsLongs(blocks,null).getBlockListAsLongs();
    }
View Full Code Here

    // that have been reported from those that have not
    BlockInfo delimiter = new BlockInfo(new Block(), 1);
    boolean added = dn.addBlock(delimiter);
    assert added : "Delimiting block cannot be present in the node";
    if(newReport == null)
      newReport = new BlockListAsLongs();
    // scan the report and process newly reported blocks
    BlockReportIterator itBR = newReport.getBlockReportIterator();
    while(itBR.hasNext()) {
      Block iblk = itBR.next();
      ReplicaState iState = itBR.getCurrentReplicaState();
View Full Code Here

  @Override // DatanodeProtocol
  public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
      String poolId, long[] blocks) throws IOException {
    verifyRequest(nodeReg);
    BlockListAsLongs blist = new BlockListAsLongs(blocks);
    if(blockStateChangeLog.isDebugEnabled()) {
      blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
           + "from " + nodeReg.getName() + " " + blist.getNumberOfBlocks()
           + " blocks");
    }

    namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
    if (nn.getFSImage().isUpgradeFinalized())
View Full Code Here

  public BlockListAsLongs getBlockReport(String bpid) {
    int size =  volumeMap.size(bpid);
    ArrayList<ReplicaInfo> finalized = new ArrayList<ReplicaInfo>(size);
    ArrayList<ReplicaInfo> uc = new ArrayList<ReplicaInfo>();
    if (size == 0) {
      return new BlockListAsLongs(finalized, uc);
    }
   
    synchronized(this) {
      for (ReplicaInfo b : volumeMap.replicas(bpid)) {
        switch(b.getState()) {
        case FINALIZED:
          finalized.add(b);
          break;
        case RBW:
        case RWR:
          uc.add(b);
          break;
        case RUR:
          ReplicaUnderRecovery rur = (ReplicaUnderRecovery)b;
          uc.add(rur.getOriginalReplica());
          break;
        case TEMPORARY:
          break;
        default:
          assert false : "Illegal ReplicaInfo state.";
        }
      }
      return new BlockListAsLongs(finalized, uc);
    }
  }
View Full Code Here

    // all blocks belong to the same file, hence same BP
    DataNode dn = cluster.getDataNodes().get(DN_N0);
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    cluster.getNameNodeRpc().blockReport(dnR, poolId,
      new BlockListAsLongs(blocks, null).getBlockListAsLongs());

    List<LocatedBlock> blocksAfterReport =
      DFSTestUtil.getAllBlocks(fs.open(filePath));

    if(LOG.isDebugEnabled()) {
View Full Code Here

    // all blocks belong to the same file, hence same BP
    DataNode dn = cluster.getDataNodes().get(DN_N0);
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    cluster.getNameNodeRpc().blockReport(dnR, poolId,
      new BlockListAsLongs(blocks, null).getBlockListAsLongs());

    BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem()
        .getBlockManager());

    printStats();
View Full Code Here

    DataNode dn = cluster.getDataNodes().get(DN_N0);
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    DatanodeCommand dnCmd =
      cluster.getNameNodeRpc().blockReport(dnR, poolId,
        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
    if(LOG.isDebugEnabled()) {
      LOG.debug("Got the command: " + dnCmd);
    }
    printStats();
View Full Code Here

// all blocks belong to the same file, hence same BP
    DataNode dn = cluster.getDataNodes().get(DN_N1);
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    cluster.getNameNodeRpc().blockReport(dnR, poolId,
        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
    printStats();
    assertEquals("Wrong number of PendingReplication Blocks",
      0, cluster.getNamesystem().getUnderReplicatedBlocks());
  }
View Full Code Here

    // all blocks belong to the same file, hence same BP
    DataNode dn = cluster.getDataNodes().get(DN_N1);
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    cluster.getNameNodeRpc().blockReport(dnR, poolId,
        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
    printStats();
    assertEquals("Wrong number of Corrupted blocks",
      1, cluster.getNamesystem().getCorruptReplicaBlocks() +
// the following might have to be added into the equation if
// the same block could be in two different states at the same time
// and then the expected number of has to be changed to '2'       
//        cluster.getNamesystem().getPendingReplicationBlocks() +
        cluster.getNamesystem().getPendingDeletionBlocks());

    // Get another block and screw its length to be less than original
    if (randIndex == 0)
      randIndex++;
    else
      randIndex--;
    corruptedBlock = blocks.get(randIndex);
    corruptBlockLen(corruptedBlock);
    if(LOG.isDebugEnabled()) {
      LOG.debug("Done corrupting length of " + corruptedBlock.getBlockName());
    }
   
    cluster.getNameNodeRpc().blockReport(dnR, poolId,
        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
    printStats();

    assertEquals("Wrong number of Corrupted blocks",
      2, cluster.getNamesystem().getCorruptReplicaBlocks() +
        cluster.getNamesystem().getPendingReplicationBlocks() +
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.BlockListAsLongs

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.