Package org.apache.hadoop.hdfs.server.protocol

Examples of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration


      cluster.waitActive();
      final FSNamesystem namesystem = cluster.getNamesystem();
      final HeartbeatManager hm = namesystem.getBlockManager(
          ).getDatanodeManager().getHeartbeatManager();
      final String poolId = namesystem.getBlockPoolId();
      final DatanodeRegistration nodeReg =
        DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
      final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
      final String storageID = DatanodeStorage.generateUuid();
      dd.updateStorage(new DatanodeStorage(storageID));
View Full Code Here


      cluster.waitActive();
      final FSNamesystem namesystem = cluster.getNamesystem();
      final HeartbeatManager hm = namesystem.getBlockManager(
          ).getDatanodeManager().getHeartbeatManager();
      final String poolId = namesystem.getBlockPoolId();
      final DatanodeRegistration nodeReg1 =
        DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
      final DatanodeDescriptor dd1 = NameNodeAdapter.getDatanode(namesystem, nodeReg1);
      dd1.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
      final DatanodeRegistration nodeReg2 =
        DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(1), poolId);
      final DatanodeDescriptor dd2 = NameNodeAdapter.getDatanode(namesystem, nodeReg2);
      dd2.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
      final DatanodeRegistration nodeReg3 =
        DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2), poolId);
      final DatanodeDescriptor dd3 = NameNodeAdapter.getDatanode(namesystem, nodeReg3);
      dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));

      try {
View Full Code Here

    dnrList = new ArrayList<DatanodeRegistration>();
    dnManager = namenode.getNamesystem().getBlockManager().getDatanodeManager();

    // Register DNs
    for (int i=0; i < 6; i++) {
      DatanodeRegistration dnr = new DatanodeRegistration(dataNodes[i],
          new StorageInfo(NodeType.DATA_NODE), new ExportedBlockKeys(),
          VersionInfo.getVersion());
      dnrList.add(dnr);
      dnManager.registerDatanode(dnr);
      dataNodes[i].getStorageInfos()[0].setUtilizationForTesting(
View Full Code Here

          .getCTime();
      StorageInfo mockStorageInfo = mock(StorageInfo.class);
      doReturn(nnCTime).when(mockStorageInfo).getCTime();
      doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockStorageInfo)
          .getLayoutVersion();
      DatanodeRegistration dnReg = new DatanodeRegistration(dnId,
          mockStorageInfo, null, VersionInfo.getVersion());
      rpcServer.registerDatanode(dnReg);

      DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
      assertEquals("Expected a registered datanode", 1, report.length);

      // register the same datanode again with a different storage ID
      dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
          "changed-fake-datanode-id", DN_XFER_PORT, DN_INFO_PORT,
          DN_INFO_SECURE_PORT, DN_IPC_PORT);
      dnReg = new DatanodeRegistration(dnId,
          mockStorageInfo, null, VersionInfo.getVersion());
      rpcServer.registerDatanode(dnReg);

      report = client.datanodeReport(DatanodeReportType.ALL);
      assertEquals("Datanode with changed storage ID not recognized",
View Full Code Here

     
      long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
      StorageInfo mockStorageInfo = mock(StorageInfo.class);
      doReturn(nnCTime).when(mockStorageInfo).getCTime();
     
      DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
      doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
      doReturn("127.0.0.1").when(mockDnReg).getIpAddr();
      doReturn(123).when(mockDnReg).getXferPort();
      doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
      doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
View Full Code Here

     
      long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
      StorageInfo mockStorageInfo = mock(StorageInfo.class);
      doReturn(nnCTime).when(mockStorageInfo).getCTime();
     
      DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
      doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
      doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
      doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
     
      // Should succeed when software versions are the same and CTimes are the
View Full Code Here

  }
 
  private void transferBlock(int namespaceId, Block block,
      DatanodeInfo xferTargets[]) throws IOException {
    DatanodeProtocol nn = getNSNamenode(namespaceId);
    DatanodeRegistration nsReg = getDNRegistrationForNS(namespaceId);

    if (!data.isValidBlock(namespaceId, block, true)) {
      // block does not exist or is under-construction
      String errStr = "Can't send invalid block " + block;
      LOG.info(errStr);
View Full Code Here

    // Use IP Address and port number to determine locality. Relying on the
    // DatanodeID of both the target machine and the local machine to
    // determine locality. This guarantees uniformity in comparison.
    String targetMachine = target.getHost();
    int targetPort = target.getPort();
    DatanodeRegistration dnRegistration = getDNRegistrationForNS(srcNamespaceId);
    int localPort = dnRegistration.getPort();
    String localMachine = dnRegistration.getHost();

    Future<Boolean> result;
    // If the target datanode is our datanode itself, then perform local copy.
    if (targetMachine.equals(localMachine) && targetPort == localPort) {
      LOG.info("Performing local block copy since source and "
View Full Code Here

    private ScheduledExecutorService keepAliveSender = null;
    private boolean firstBlockReportSent = false;
    volatile long lastBeingAlive = now();

    NSOfferService(InetSocketAddress isa, String nameserviceId) {
      this.nsRegistration = new DatanodeRegistration(getMachineName());
      this.nnAddr = isa;
      this.nameserviceId = nameserviceId;
    }
View Full Code Here

        .setKeys(PBHelper.convert(registration.getExportedKeys()))
        .setSoftwareVersion(registration.getSoftwareVersion()).build();
  }

  public static DatanodeRegistration convert(DatanodeRegistrationProto proto) {
    return new DatanodeRegistration(PBHelper.convert(proto.getDatanodeID()),
        PBHelper.convert(proto.getStorageInfo()), PBHelper.convert(proto
            .getKeys()), proto.getSoftwareVersion());
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.