Examples of BlockCommand


Examples of org.apache.hadoop.hdfs.server.protocol.BlockCommand

  }

  BlockCommand getLeaseRecoveryCommand(int maxTransfers) {
    List<BlockTargetPair> blocktargetlist = recoverBlocks.poll(maxTransfers);
    return blocktargetlist == null? null:
        new BlockCommand(DatanodeProtocol.DNA_RECOVERBLOCK, blocktargetlist);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.BlockCommand

   * Remove the specified number of blocks to be invalidated
   */
  BlockCommand getInvalidateBlocks(int maxblocks) {
    Block[] deleteList = getBlockArray(invalidateBlocks, maxblocks);
    return deleteList == null?
        null: new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, deleteList);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.BlockCommand

  public void testIgnoreDeletionsFromNonActive() throws Exception {
    BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);

    // Ask to invalidate FAKE_BLOCK when block report hits the
    // standby
    Mockito.doReturn(new BlockCommand(DatanodeProtocol.DNA_INVALIDATE,
        FAKE_BPID, new Block[] { FAKE_BLOCK.getLocalBlock() }))
        .when(mockNN2).blockReport(
            Mockito.<DatanodeRegistration>anyObject()
            Mockito.eq(FAKE_BPID),
            Mockito.<StorageBlockReport[]>anyObject());
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.BlockCommand

    * @throws IOException
    */
   private boolean processCommand(DatanodeCommand cmd) throws IOException {
     if (cmd == null)
       return true;
     final BlockCommand bcmd = cmd instanceof BlockCommand? (BlockCommand)cmd: null;

     boolean retValue = true;
     long startTime = System.currentTimeMillis();

     switch(cmd.getAction()) {
     case DatanodeProtocol.DNA_TRANSFER:
       // Send a copy of a block to another datanode
       transferBlocks(namespaceId,
           bcmd.getBlocks(), bcmd.getTargets());
       myMetrics.blocksReplicated.inc(bcmd.getBlocks().length);
       break;
     case DatanodeProtocol.DNA_INVALIDATE:
       //
       // Some local block(s) are obsolete and can be
       // safely garbage-collected.
       //
       Block toDelete[] = bcmd.getBlocks();
       try {
         if (blockScanner != null) {
           blockScanner.deleteBlocks(namespaceId, toDelete);
         }       
         data.invalidate(namespaceId, toDelete);
       } catch(IOException e) {
         checkDiskError();
         throw e;
       }
       myMetrics.blocksRemoved.inc(toDelete.length);
       break;
     case DatanodeProtocol.DNA_SHUTDOWN:
       // shut down the data node
       shouldServiceRun = false;
       retValue = false;
       break;
     case DatanodeProtocol.DNA_REGISTER:
       // namenode requested a registration - at start or if NN lost contact
       LOG.info("DatanodeCommand action: DNA_REGISTER");
       if (shouldRun) {
         register();
         firstBlockReportSent = false;
       }
       break;
     case DatanodeProtocol.DNA_FINALIZE:
        storage.finalizedUpgrade(namespaceId);
       break;
     case UpgradeCommand.UC_ACTION_START_UPGRADE:
       // start distributed upgrade here
       processDistributedUpgradeCommand((UpgradeCommand)cmd);
       break;
     case DatanodeProtocol.DNA_RECOVERBLOCK:
       recoverBlocks(namespaceId, bcmd.getBlocks(), bcmd.getTargets());
       break;
     default:
       LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
     }
     long endTime = System.currentTimeMillis();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.BlockCommand

      break;
    case SHUTDOWN:
      action = DatanodeProtocol.DNA_SHUTDOWN;
      break;
    }
    return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.BlockCommand

        final List<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>();
        //check pending replication
        List<BlockTargetPair> pendingList = nodeinfo.getReplicationCommand(
              maxTransfers);
        if (pendingList != null) {
          cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
              pendingList));
        }
        //check block invalidation
        Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
        if (blks != null) {
          cmds.add(new BlockCommand(DatanodeProtocol.DNA_INVALIDATE,
              blockPoolId, blks));
        }
       
        blockManager.addKeyUpdateCommand(cmds, nodeinfo);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.BlockCommand

   */
  private boolean processCommandFromActive(DatanodeCommand cmd,
      BPServiceActor actor) throws IOException {
    if (cmd == null)
      return true;
    final BlockCommand bcmd =
      cmd instanceof BlockCommand? (BlockCommand)cmd: null;

    switch(cmd.getAction()) {
    case DatanodeProtocol.DNA_TRANSFER:
      // Send a copy of a block to another datanode
      dn.transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(), bcmd.getTargets());
      dn.metrics.incrBlocksReplicated(bcmd.getBlocks().length);
      break;
    case DatanodeProtocol.DNA_INVALIDATE:
      //
      // Some local block(s) are obsolete and can be
      // safely garbage-collected.
      //
      Block toDelete[] = bcmd.getBlocks();
      try {
        if (dn.blockScanner != null) {
          dn.blockScanner.deleteBlocks(bcmd.getBlockPoolId(), toDelete);
        }
        // using global fsdataset
        dn.getFSDataset().invalidate(bcmd.getBlockPoolId(), toDelete);
      } catch(IOException e) {
        dn.checkDiskError();
        throw e;
      }
      dn.metrics.incrBlocksRemoved(toDelete.length);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.BlockCommand

        final List<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>();
        //check pending replication
        List<BlockTargetPair> pendingList = nodeinfo.getReplicationCommand(
              maxTransfers);
        if (pendingList != null) {
          cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
              pendingList));
        }
        //check block invalidation
        Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
        if (blks != null) {
          cmds.add(new BlockCommand(DatanodeProtocol.DNA_INVALIDATE,
              blockPoolId, blks));
        }
       
        blockManager.addKeyUpdateCommand(cmds, nodeinfo);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.BlockCommand

      break;
    case SHUTDOWN:
      action = DatanodeProtocol.DNA_SHUTDOWN;
      break;
    }
    return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.BlockCommand

      DatanodeCommand cmd = nameNode.sendHeartbeat(
          dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, 0, 0);
      if(cmd == null || cmd.getAction() != DatanodeProtocol.DNA_TRANSFER)
        return 0;
      // Send a copy of a block to another datanode
      BlockCommand bcmd = (BlockCommand)cmd;
      return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.