Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.DatanodeID


    this.minReadSpeedBps = conf.getLong("dfs.min.read.speed.bps", -1);
    this.maxBlockAcquireFailures = getMaxBlockAcquireFailures(conf);
    this.localHost = InetAddress.getLocalHost();
   
    // fetch network location of localhost
    this.pseuDatanodeInfoForLocalhost = new DatanodeInfo(new DatanodeID(
        this.localHost.getHostAddress()));
    this.dnsToSwitchMapping = ReflectionUtils.newInstance(
        conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class,
          DNSToSwitchMapping.class), conf);
    ArrayList<String> tempList = new ArrayList<String>();
View Full Code Here


      // address and port of that node.
      DatanodeInfo[] favoredNodeInfos = null;
      if (favoredNodes != null) {
        favoredNodeInfos = new DatanodeInfo[favoredNodes.length];
        for (int i = 0; i < favoredNodes.length; i++) {
          favoredNodeInfos[i] = new DatanodeInfo(new DatanodeID(
              favoredNodes[i].getAddress().getHostAddress() + ":" +
              favoredNodes[i].getPort()));
        }
      }
View Full Code Here

      final ServletContext context = getServletContext();
      final Configuration conf = NameNodeHttpServer.getConfFromContext(context);
      final UserGroupInformation ugi = getUGI(request, conf);
      final NameNode namenode = NameNodeHttpServer.getNameNodeFromContext(
          context);
      final DatanodeID datanode = NamenodeJspHelper.getRandomDatanode(namenode);
      try {
        response.sendRedirect(
            createRedirectURL(ugi, datanode, request, namenode).toString());
      } catch (IOException e) {
        response.sendError(400, e.getMessage());
View Full Code Here

    void register() throws IOException {
      // get versions from the namenode
      nsInfo = nameNodeProto.versionRequest();
      dnRegistration = new DatanodeRegistration(
          new DatanodeID(DNS.getDefaultIP("default"),
              DNS.getDefaultHost("default", "default"),
              DataNode.generateUuid(), getNodePort(dnIdx),
              DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
              DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
              DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
View Full Code Here

    String scheme = request.getScheme();
    final LocatedBlocks blks = nnproxy.getBlockLocations(
        status.getFullPath(new Path(path)).toUri().getPath(), 0, 1);
    final Configuration conf = NameNodeHttpServer.getConfFromContext(
        getServletContext());
    final DatanodeID host = pickSrcDatanode(blks, status, conf);
    final String hostname;
    if (host instanceof DatanodeInfo) {
      hostname = host.getHostName();
    } else {
      hostname = host.getIpAddr();
    }

    int port = "https".equals(scheme) ? host.getInfoSecurePort() : host
        .getInfoPort();

    String dtParam = "";
    if (dt != null) {
      dtParam = JspHelper.getDelegationTokenUrlParam(dt);
View Full Code Here

      // now considered dead because it is no longer allowed to connect
      // and (2) the bogus entry in the hosts file (these entries are
      // always added last)
      info = client.datanodeReport(DatanodeReportType.DEAD);
      assertEquals("There should be 2 dead nodes", 2, info.length);
      DatanodeID id = cluster.getDataNodes().get(0).getDatanodeId();
      assertEquals(id.getHostName(), info[0].getHostName());
      assertEquals(bogusIp, info[1].getHostName());
    }
  }
View Full Code Here

    FileSystem fileSys = cluster.getFileSystem();
    writeFile(fileSys, file1, replicas);
       
    DFSClient client = getDfsClient(cluster.getNameNode(), conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    DatanodeID excludedDatanodeID = info[0];
    String excludedDatanodeName = info[0].getXferAddr();

    writeConfigFile(excludeFile, new ArrayList<String>(Arrays.asList(excludedDatanodeName)));
   
    //Add a new datanode to cluster
View Full Code Here

      return;
    }
    boolean allAlive = false;
    while (!allAlive) {
      // locate the first dead node.
      DatanodeID dead = null;
      // check the number of stale nodes
      int numOfStaleNodes = 0;
      synchronized(this) {
        for (DatanodeDescriptor d : datanodes) {
          if (dead == null && dm.isDatanodeDead(d)) {
View Full Code Here

      setLength(amtToRead).
      setVerifyChecksum(true).
      setClientName("JspHelper").
      setClientCacheContext(ClientContext.getFromConf(conf)).
      setDatanodeInfo(new DatanodeInfo(
          new DatanodeID(addr.getAddress().getHostAddress(),
              addr.getHostName(), poolId, addr.getPort(), 0, 0, 0))).
      setCachingStrategy(CachingStrategy.newDefaultStrategy()).
      setConfiguration(conf).
      setRemotePeerFactory(new RemotePeerFactory() {
        @Override
View Full Code Here

        si, convert(reg.getRole()));
  }

  // DatanodeId
  public static DatanodeID convert(DatanodeIDProto dn) {
    return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getDatanodeUuid(),
        dn.getXferPort(), dn.getInfoPort(), dn.hasInfoSecurePort() ? dn
        .getInfoSecurePort() : 0, dn.getIpcPort());
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.DatanodeID

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.