Examples of BlockListAsLongs


Examples of org.apache.hadoop.hdfs.protocol.BlockListAsLongs

  @Override // DatanodeProtocol
  public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
      String poolId, StorageBlockReport[] reports) throws IOException {
    verifyRequest(nodeReg);
    BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
    if(blockStateChangeLog.isDebugEnabled()) {
      blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
           + "from " + nodeReg + " " + blist.getNumberOfBlocks()
           + " blocks");
    }

    namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
    if (nn.getFSImage().isUpgradeFinalized() && !nn.isStandbyState())
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.BlockListAsLongs

        if (b.isFinalized()) {
          blocks.add(b.theBlock);
        }
      }
    }
    return new BlockListAsLongs(blocks, null);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.BlockListAsLongs

      // a FINALIZED one.
      reportReceivedDeletedBlocks();

      // Create block report
      long brCreateStartTime = now();
      BlockListAsLongs bReport = dn.getFSDataset().getBlockReport(
          bpos.getBlockPoolId());

      // Send block report
      long brSendStartTime = now();
      StorageBlockReport[] report = { new StorageBlockReport(
          new DatanodeStorage(bpRegistration.getStorageID()),
          bReport.getBlockListAsLongs()) };
      cmd = bpNamenode.blockReport(bpRegistration, bpos.getBlockPoolId(), report);

      // Log the block report processing stats from Datanode perspective
      long brSendCost = now() - brSendStartTime;
      long brCreateCost = brSendStartTime - brCreateStartTime;
      dn.getMetrics().addBlockReport(brSendCost);
      LOG.info("BlockReport of " + bReport.getNumberOfBlocks()
          + " blocks took " + brCreateCost + " msec to generate and "
          + brSendCost + " msecs for RPC and NN processing");

      // If we have sent the first block report, then wait a random
      // time before we start the periodic block reports.
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.BlockListAsLongs

    // that have been reported from those that have not
    BlockInfo delimiter = new BlockInfo(new Block(), 1);
    boolean added = this.addBlock(delimiter);
    assert added : "Delimiting block cannot be present in the node";
    if(newReport == null)
      newReport = new BlockListAsLongs( new long[0]);
    // scan the report and collect newly reported blocks
    // Note we are taking special precaution to limit tmp blocks allocated
    // as part this block report - which why block list is stored as longs
    Block iblk = new Block(); // a fixed new'ed block to be reused with index i
    for (int i = 0; i < newReport.getNumberOfBlocks(); ++i) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.BlockListAsLongs

  public BlockListAsLongs getBlockReport(String bpid) {
    int size =  volumeMap.size(bpid);
    ArrayList<ReplicaInfo> finalized = new ArrayList<ReplicaInfo>(size);
    ArrayList<ReplicaInfo> uc = new ArrayList<ReplicaInfo>();
    if (size == 0) {
      return new BlockListAsLongs(finalized, uc);
    }
   
    synchronized(this) {
      for (ReplicaInfo b : volumeMap.replicas(bpid)) {
        switch(b.getState()) {
        case FINALIZED:
          finalized.add(b);
          break;
        case RBW:
        case RWR:
          uc.add(b);
          break;
        case RUR:
          ReplicaUnderRecovery rur = (ReplicaUnderRecovery)b;
          uc.add(rur.getOriginalReplica());
          break;
        case TEMPORARY:
          break;
        default:
          assert false : "Illegal ReplicaInfo state.";
        }
      }
      return new BlockListAsLongs(finalized, uc);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.BlockListAsLongs

      dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
      //first block reports
      storage = new DatanodeStorage(dnRegistration.getStorageID());
      final StorageBlockReport[] reports = {
          new StorageBlockReport(storage,
              new BlockListAsLongs(null, null).getBlockListAsLongs())
      };
      nameNodeProto.blockReport(dnRegistration,
          nameNode.getNamesystem().getBlockPoolId(), reports);
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.BlockListAsLongs

    void formBlockReport() {
      // fill remaining slots with blocks that do not exist
      for(int idx = blocks.size()-1; idx >= nrBlocks; idx--)
        blocks.set(idx, new Block(blocks.size() - idx, 0, 0));
      blockReportList = new BlockListAsLongs(blocks,null).getBlockListAsLongs();
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.BlockListAsLongs

  public BlockListAsLongs getBlockReport(String bpid) {
    int size =  volumeMap.size(bpid);
    ArrayList<ReplicaInfo> finalized = new ArrayList<ReplicaInfo>(size);
    ArrayList<ReplicaInfo> uc = new ArrayList<ReplicaInfo>();
    if (size == 0) {
      return new BlockListAsLongs(finalized, uc);
    }
   
    synchronized(this) {
      for (ReplicaInfo b : volumeMap.replicas(bpid)) {
        switch(b.getState()) {
        case FINALIZED:
          finalized.add(b);
          break;
        case RBW:
        case RWR:
          uc.add(b);
          break;
        case RUR:
          ReplicaUnderRecovery rur = (ReplicaUnderRecovery)b;
          uc.add(rur.getOriginalReplica());
          break;
        case TEMPORARY:
          break;
        default:
          assert false : "Illegal ReplicaInfo state.";
        }
      }
      return new BlockListAsLongs(finalized, uc);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.BlockListAsLongs

  @Override // DatanodeProtocol
  public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
      String poolId, StorageBlockReport[] reports) throws IOException {
    verifyRequest(nodeReg);
    BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
    if(stateChangeLog.isDebugEnabled()) {
      stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
           + "from " + nodeReg + " " + blist.getNumberOfBlocks()
           + " blocks");
    }

    namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
    if (nn.getFSImage().isUpgradeFinalized() && !nn.isStandbyState())
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.BlockListAsLongs

      // a FINALIZED one.
      reportReceivedDeletedBlocks();

      // Create block report
      long brCreateStartTime = now();
      BlockListAsLongs bReport = dn.getFSDataset().getBlockReport(
          bpos.getBlockPoolId());

      // Send block report
      long brSendStartTime = now();
      StorageBlockReport[] report = { new StorageBlockReport(
          new DatanodeStorage(bpRegistration.getStorageID()),
          bReport.getBlockListAsLongs()) };
      cmd = bpNamenode.blockReport(bpRegistration, bpos.getBlockPoolId(), report);

      // Log the block report processing stats from Datanode perspective
      long brSendCost = now() - brSendStartTime;
      long brCreateCost = brSendStartTime - brCreateStartTime;
      dn.getMetrics().addBlockReport(brSendCost);
      LOG.info("BlockReport of " + bReport.getNumberOfBlocks()
          + " blocks took " + brCreateCost + " msec to generate and "
          + brSendCost + " msecs for RPC and NN processing");

      // If we have sent the first block report, then wait a random
      // time before we start the periodic block reports.
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.