Package org.apache.hadoop.hdfs.server.protocol

Examples of org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol


 
  private void handleDiskError(String errMsgr) throws IOException{
    boolean hasEnoughResource = data.hasEnoughResource();
    myMetrics.volumeFailures.inc();
    for(Integer namespaceId : namespaceManager.getAllNamespaces()){
      DatanodeProtocol nn = getNSNamenode(namespaceId);
      LOG.warn("DataNode.handleDiskError: Keep Running: " + hasEnoughResource);
     
      //if hasEnoughtResource = true - more volumes are available, so we don't want
      // to shutdown DN completely and don't want NN to remove it.
      int dp_error = DatanodeProtocol.DISK_ERROR;
      if(hasEnoughResource == false) {
        // DN will be shutdown and NN should remove it
        dp_error = DatanodeProtocol.FATAL_DISK_ERROR;
      }
      //inform NameNode
      try {
        nn.errorReport(getDNRegistrationForNS(namespaceId), dp_error, errMsgr);
      } catch(IOException ignored) {             
      }
     
     
      if(hasEnoughResource) {
View Full Code Here


    }
  }
 
  private void transferBlock(int namespaceId, Block block,
      DatanodeInfo xferTargets[]) throws IOException {
    DatanodeProtocol nn = getNSNamenode(namespaceId);
    DatanodeRegistration nsReg = getDNRegistrationForNS(namespaceId);

    if (!data.isValidBlock(namespaceId, block, true)) {
      // block does not exist or is under-construction
      String errStr = "Can't send invalid block " + block;
      LOG.info(errStr);
      nn.errorReport(nsReg, DatanodeProtocol.INVALID_BLOCK, errStr);
      return;
    }

    // Check if NN recorded length matches on-disk length
    long onDiskLength = data.getFinalizedBlockLength(namespaceId, block);
    if (block.getNumBytes() > onDiskLength) {
      // Shorter on-disk len indicates corruption so report NN the corrupt block
      nn.reportBadBlocks(new LocatedBlock[] { new LocatedBlock(block,
          new DatanodeInfo[] { new DatanodeInfo(nsReg) }) });
      LOG.info("Can't replicate block " + block + " because on-disk length "
          + onDiskLength + " is shorter than NameNode recorded length "
          + block.getNumBytes());
      return;
View Full Code Here

    }

    void setupNS(Configuration conf, AbstractList<File> dataDirs)
    throws IOException {
      // get NN proxy
      DatanodeProtocol dnp =
        (DatanodeProtocol)RPC.waitForProxy(DatanodeProtocol.class,
            DatanodeProtocol.versionID, nnAddr, conf);
      setNameNode(dnp);

      // handshake with NN
View Full Code Here

      out.hflush();

      // Set up a spy so that we can delay the block report coming
      // from this node.
      DataNode dn = cluster.getDataNodes().get(0);
      DatanodeProtocol spy =
        DataNodeTestUtils.spyOnBposToNN(dn, nn);
     
      Mockito.doAnswer(delayer)
        .when(spy).blockReport(
          Mockito.<DatanodeRegistration>anyObject(),
View Full Code Here

    doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
        block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
        initReplicaRecovery(any(RecoveringBlock.class));
    Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
    d.join();
    DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
    verify(dnP).commitBlockSynchronization(
        block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
  }
View Full Code Here

      dn.syncBlock(rBlock, initBlockRecords(dn));
      fail("Sync should fail");
    } catch (IOException e) {
      e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never()).commitBlockSynchronization(
        any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
        anyBoolean(), any(DatanodeID[].class), any(String[].class));
  }
View Full Code Here

        dn.syncBlock(rBlock, initBlockRecords(dn));
        fail("Sync should fail");
      } catch (IOException e) {
        e.getMessage().startsWith("Cannot recover ");
      }
      DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
      verify(namenode, never()).commitBlockSynchronization(
          any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
          anyBoolean(), any(DatanodeID[].class), any(String[].class));
    } finally {
      streams.close();
View Full Code Here

      out.hflush();

      // Set up a spy so that we can delay the block report coming
      // from this node.
      DataNode dn = cluster.getDataNodes().get(0);
      DatanodeProtocol spy =
        DataNodeTestUtils.spyOnBposToNN(dn, nn);
     
      Mockito.doAnswer(delayer)
        .when(spy).blockReport(
          Mockito.<DatanodeRegistration>anyObject(),
View Full Code Here

    doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
        block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
        initReplicaRecovery(any(RecoveringBlock.class));
    Daemon d = spyDN.recoverBlocks(initRecoveringBlocks());
    d.join();
    DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
    verify(dnP).commitBlockSynchronization(
        block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
  }
View Full Code Here

      dn.syncBlock(rBlock, initBlockRecords(dn));
      fail("Sync should fail");
    } catch (IOException e) {
      e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never()).commitBlockSynchronization(
        any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
        anyBoolean(), any(DatanodeID[].class), any(String[].class));
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.