Package org.apache.hadoop.hdfs.server.protocol

Examples of org.apache.hadoop.hdfs.server.protocol.DatanodeCommand


            long brCreateStartTime = now();
            Block[] bReport = data.retrieveAsyncBlockReport();
           
            // Send block report
            long brSendStartTime = now();
            DatanodeCommand cmd = namenode.blockReport(dnRegistration,
                    BlockListAsLongs.convertToArrayLongs(bReport));
           
            // Log the block report processing stats from Datanode perspective
            long brSendCost = now() - brSendStartTime;
            long brCreateCost = brSendStartTime - brCreateStartTime;
View Full Code Here


   * @throws IOException
   */
  DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
      long capacity, long dfsUsed, long remaining,
      int xceiverCount, int xmitsInProgress) throws IOException {
    DatanodeCommand cmd = null;
    synchronized (heartbeats) {
      synchronized (datanodeMap) {
        DatanodeDescriptor nodeinfo = null;
        try {
          nodeinfo = getDatanode(nodeReg);
View Full Code Here

   * @throws IOException
   */
  DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
      long capacity, long dfsUsed, long remaining,
      int xceiverCount, int xmitsInProgress) throws IOException {
    DatanodeCommand cmd = null;
    synchronized (heartbeats) {
      synchronized (datanodeMap) {
        DatanodeDescriptor nodeinfo = null;
        try {
          nodeinfo = getDatanode(nodeReg);
View Full Code Here

            continue;
        }
           
        reportReceivedBlocks();

        DatanodeCommand cmd = blockReport();
        processCommand(cmd);

        // start block scanner
        if (blockScanner != null && blockScannerThread == null &&
            upgradeManager.isUpgradeCompleted()) {
View Full Code Here

   * Report the list blocks to the Namenode
   * @throws IOException
   */
  private DatanodeCommand blockReport() throws IOException {
    // send block report
    DatanodeCommand cmd = null;
    long startTime = now();
    if (startTime - lastBlockReport > blockReportInterval) {
      //
      // Send latest block report if timer has expired.
      // Get back a list of local block(s) that are obsolete
View Full Code Here

    // The block with modified GS won't be found. Has to be deleted
    blocks.get(0).setGenerationStamp(rand.nextLong());
    // This new block is unknown to NN and will be mark for deletion.
    blocks.add(new Block());
    DatanodeCommand dnCmd =
      cluster.getNameNode().blockReport(
        cluster.getDataNodes().get(DN_N0).dnRegistration,
        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
    LOG.debug("Got the command: " + dnCmd);
    printStats();
View Full Code Here

   * Report the list blocks to the Namenode
   * @throws IOException
   */
  DatanodeCommand blockReport() throws IOException {
    // send block report if timer has expired.
    DatanodeCommand cmd = null;
    long startTime = now();
    if (startTime - lastBlockReport > dnConf.blockReportInterval) {

      // Flush any block information that precedes the block report. Otherwise
      // we have a chance that we will miss the delHint information
View Full Code Here

    // If caching is disabled, do not send a cache report
    if (dn.getFSDataset().getCacheCapacity() == 0) {
      return null;
    }
    // send cache report if timer has expired.
    DatanodeCommand cmd = null;
    long startTime = Time.monotonicNow();
    if (startTime - lastCacheReport > dnConf.cacheReportInterval) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Sending cacheReport from service actor: " + this);
      }
View Full Code Here

            || (startTime - lastDeletedReport > dnConf.deleteReportInterval)) {
          reportReceivedDeletedBlocks();
          lastDeletedReport = startTime;
        }

        DatanodeCommand cmd = blockReport();
        processCommand(new DatanodeCommand[]{ cmd });

        cmd = cacheReport();
        processCommand(new DatanodeCommand[]{ cmd });
View Full Code Here

  }

  @Override
  public BlockReportResponseProto blockReport(RpcController controller,
      BlockReportRequestProto request) throws ServiceException {
    DatanodeCommand cmd = null;
    StorageBlockReport[] report =
        new StorageBlockReport[request.getReportsCount()];
   
    int index = 0;
    for (StorageBlockReportProto s : request.getReportsList()) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.protocol.DatanodeCommand

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.