Package org.apache.hadoop.hdfs.server.protocol

Examples of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration


    boolean checked = false;
    // Find the datanode that has the block according to locations
    // and check that the URL was redirected to this DN's info port
    for (DataNode node : cluster.getDataNodes()) {
      DatanodeRegistration dnR =
        DataNodeTestUtils.getDNRegistrationForBP(node, blockPoolId);
      if (dnR.getXferAddr().equals(xferAddr)) {
        checked = true;
        assertEquals(dnR.getInfoPort(), conn.getURL().getPort());
      }
    }
    assertTrue("The test never checked that location of " +
               "the block and hftp desitnation are the same", checked);
  }
View Full Code Here


        DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
    return new DatanodeDescriptor(dnId, rackLocation);
  }
 
  public static DatanodeRegistration getLocalDatanodeRegistration() {
    return new DatanodeRegistration(getLocalDatanodeID(),
        new StorageInfo(), new ExportedBlockKeys(), VersionInfo.getVersion());
  }
View Full Code Here

      cluster.waitActive();
      final FSNamesystem namesystem = cluster.getNamesystem();
      final HeartbeatManager hm = namesystem.getBlockManager(
          ).getDatanodeManager().getHeartbeatManager();
      final String poolId = namesystem.getBlockPoolId();
      final DatanodeRegistration nodeReg =
        DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
      final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);

      final int REMAINING_BLOCKS = 1;
      final int MAX_REPLICATE_LIMIT =
View Full Code Here

      cluster.waitActive();
      final FSNamesystem namesystem = cluster.getNamesystem();
      final HeartbeatManager hm = namesystem.getBlockManager(
          ).getDatanodeManager().getHeartbeatManager();
      final String poolId = namesystem.getBlockPoolId();
      final DatanodeRegistration nodeReg1 =
        DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
      final DatanodeDescriptor dd1 = NameNodeAdapter.getDatanode(namesystem, nodeReg1);
      final DatanodeRegistration nodeReg2 =
        DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(1), poolId);
      final DatanodeDescriptor dd2 = NameNodeAdapter.getDatanode(namesystem, nodeReg2);
      final DatanodeRegistration nodeReg3 =
        DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2), poolId);
      final DatanodeDescriptor dd3 = NameNodeAdapter.getDatanode(namesystem, nodeReg3);

      try {
        namesystem.writeLock();
View Full Code Here

      storageInfo = new StorageInfo(nsInfo);
    }
    DatanodeID dnId = new DatanodeID(
        streamingAddr.getAddress().getHostAddress(), hostName,
        getStorageId(), getXferPort(), getInfoPort(), getIpcPort());
    return new DatanodeRegistration(dnId, storageInfo,
        new ExportedBlockKeys(), VersionInfo.getVersion());
  }
View Full Code Here

  }
   
  private void transferBlock(ExtendedBlock block, DatanodeInfo xferTargets[])
      throws IOException {
    BPOfferService bpos = getBPOSForBlock(block);
    DatanodeRegistration bpReg = getDNRegistrationForBP(block.getBlockPoolId());
   
    if (!data.isValidBlock(block)) {
      // block does not exist or is under-construction
      String errStr = "Can't send invalid block " + block;
      LOG.info(errStr);
View Full Code Here

    //check generation stamps
    for(DatanodeID id : datanodeids) {
      try {
        BPOfferService bpos = blockPoolManager.get(blookPoolId);
        DatanodeRegistration bpReg = bpos.bpRegistration;
        InterDatanodeProtocol datanode = bpReg.equals(id)?
            this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
                dnConf.socketTimeout, dnConf.connectToDnViaHostname);
        ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
        if (info != null &&
            info.getGenerationStamp() >= block.getGenerationStamp() &&
View Full Code Here

    triggerFailure(filename, filesize);
   
    // make sure a block report is sent
    DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
    String bpid = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
    final StorageBlockReport[] report = {
        new StorageBlockReport(
            new DatanodeStorage(dnR.getStorageID()),
            DataNodeTestUtils.getFSDataset(dn).getBlockReport(bpid
                ).getBlockListAsLongs())
    };
    cluster.getNameNodeRpc().blockReport(dnR, bpid, report);
View Full Code Here

        peer.getDomainSocket().
          sendFileDescriptors(fds, buf, 0, buf.length);
      }
    } finally {
      if (ClientTraceLog.isInfoEnabled()) {
        DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk
            .getBlockPoolId());
        BlockSender.ClientTraceLog.info(String.format(
            "src: 127.0.0.1, dest: 127.0.0.1, op: REQUEST_SHORT_CIRCUIT_FDS," +
            " blockid: %s, srvID: %s, success: %b",
            blk.getBlockId(), dnR.getStorageID(), (fis != null)
          ));
      }
      if (fis != null) {
        IOUtils.cleanup(LOG, fis);
      }
View Full Code Here

    checkAccess(out, true, block, blockToken,
        Op.READ_BLOCK, BlockTokenSecretManager.AccessMode.READ);
 
    // send the block
    BlockSender blockSender = null;
    DatanodeRegistration dnR =
      datanode.getDNRegistrationForBP(block.getBlockPoolId());
    final String clientTraceFmt =
      clientName.length() > 0 && ClientTraceLog.isInfoEnabled()
        ? String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress,
            "%d", "HDFS_READ", clientName, "%d",
            dnR.getStorageID(), block, "%d")
        : dnR + " Served block " + block + " to " +
            remoteAddress;

    updateCurrentThreadName("Sending block " + block);
    try {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.