Package org.apache.hadoop.hdfs.server.datanode

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode


  /** Test metrics indicating the number of stale DataNodes */
  @Test
  public void testStaleNodes() throws Exception {
    // Set two datanodes as stale
    for (int i = 0; i < 2; i++) {
      DataNode dn = cluster.getDataNodes().get(i);
      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
      long staleInterval = CONF.getLong(
          DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
          DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);
      cluster.getNameNode().getNamesystem().getBlockManager()
          .getDatanodeManager().getDatanode(dn.getDatanodeId())
          .setLastUpdate(Time.now() - staleInterval - 1);
    }
    // Let HeartbeatManager to check heartbeat
    BlockManagerTestUtil.checkHeartbeat(cluster.getNameNode().getNamesystem()
        .getBlockManager());
    assertGauge("StaleDataNodes", 2, getMetrics(NS_METRICS));
   
    // Reset stale datanodes
    for (int i = 0; i < 2; i++) {
      DataNode dn = cluster.getDataNodes().get(i);
      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
      cluster.getNameNode().getNamesystem().getBlockManager()
          .getDatanodeManager().getDatanode(dn.getDatanodeId())
          .setLastUpdate(Time.now());
    }
   
    // Let HeartbeatManager to refresh
    BlockManagerTestUtil.checkHeartbeat(cluster.getNameNode().getNamesystem()
View Full Code Here


      final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum");
      final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
      xml.declaration();

      final ServletContext context = getServletContext();
      final DataNode datanode = (DataNode) context.getAttribute("datanode");
      final Configuration conf =
        new HdfsConfiguration(datanode.getConf());
     
      try {
        final DFSClient dfs = DatanodeJspHelper.getDFSClient(request,
            datanode, conf, getUGI(request, conf));
        final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path, Long.MAX_VALUE);
View Full Code Here

    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
      cluster.waitActive();
      final DFSAdmin dfsadmin = new DFSAdmin(conf);
      DataNode dn = cluster.getDataNodes().get(0);

      // check the datanode
      final String dnAddr = dn.getDatanodeId().getIpcAddr(false);
      final String[] args1 = {"-getDatanodeInfo", dnAddr};
      Assert.assertEquals(0, dfsadmin.run(args1));

      // issue shutdown to the datanode.
      final String[] args2 = {"-shutdownDatanode", dnAddr, "upgrade" };
      Assert.assertEquals(0, dfsadmin.run(args2));

      // the datanode should be down.
      Thread.sleep(2000);
      Assert.assertFalse("DataNode should exit", dn.isDatanodeUp());

      // ping should fail.
      Assert.assertEquals(-1, dfsadmin.run(args1));
    } finally {
      if (cluster != null) cluster.shutdown();
View Full Code Here

      LocatedBlocks blocks = client.getNamenode().getBlockLocations(
          fileName.toString(), 0, blockSize);
      DatanodeInfo[] nodes = blocks.get(0).getLocations();
      assertEquals(nodes.length, 3);
      DataNode staleNode = null;
      DatanodeDescriptor staleNodeInfo = null;
      // stop the heartbeat of the first node
      staleNode = this.stopDataNodeHeartbeat(cluster, nodes[0].getHostName());
      assertNotNull(staleNode);
      // set the first node as stale
      staleNodeInfo = cluster.getNameNode().getNamesystem().getBlockManager()
          .getDatanodeManager()
          .getDatanode(staleNode.getDatanodeId());
      staleNodeInfo.setLastUpdate(Time.now() - staleInterval - 1);

      LocatedBlocks blocksAfterStale = client.getNamenode().getBlockLocations(
          fileName.toString(), 0, blockSize);
      DatanodeInfo[] nodesAfterStale = blocksAfterStale.get(0).getLocations();
      assertEquals(nodesAfterStale.length, 3);
      assertEquals(nodesAfterStale[2].getHostName(), nodes[0].getHostName());

      // restart the staleNode's heartbeat
      DataNodeTestUtils.setHeartbeatsDisabledForTests(staleNode, false);
      // reset the first node as non-stale, so as to avoid two stale nodes
      staleNodeInfo.setLastUpdate(Time.now());

      LocatedBlock lastBlock = client.getLocatedBlocks(fileName.toString(), 0,
          Long.MAX_VALUE).getLastLocatedBlock();
      nodes = lastBlock.getLocations();
      assertEquals(nodes.length, 3);
      // stop the heartbeat of the first node for the last block
      staleNode = this.stopDataNodeHeartbeat(cluster, nodes[0].getHostName());
      assertNotNull(staleNode);
      // set the node as stale
      cluster.getNameNode().getNamesystem().getBlockManager()
          .getDatanodeManager()
          .getDatanode(staleNode.getDatanodeId())
          .setLastUpdate(Time.now() - staleInterval - 1);

      LocatedBlock lastBlockAfterStale = client.getLocatedBlocks(
          fileName.toString(), 0, Long.MAX_VALUE).getLastLocatedBlock();
      nodesAfterStale = lastBlockAfterStale.getLocations();
View Full Code Here

    // Step 1.b, start a DN with slow heartbeat, so that we can know for sure it
    // will be chosen as the target of excess replica during recommission.
    hdfsConf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 30);
    cluster.startDataNodes(hdfsConf, 1, true, null, null, null);
    DataNode lastDN = cluster.getDataNodes().get(3);
    lastDN.getDatanodeUuid();

    // Step 2, decommission the first DN at both ANN and SBN.
    DataNode firstDN = cluster.getDataNodes().get(0);

    // Step 2.a, ask ANN to decomm the first DN
    DatanodeInfo decommissionedNodeFromANN = decommissionNode(
        0, firstDN.getDatanodeUuid(), null, AdminStates.DECOMMISSIONED);

    // Step 2.b, ask SBN to decomm the first DN
    DatanodeInfo decomNodeFromSBN = decommissionNode(1, firstDN.getDatanodeUuid(), null,
        AdminStates.DECOMMISSIONED);

    // Step 3, recommission the first DN on SBN and ANN to create excess replica
    // It recommissions the node on SBN first to create potential
    // inconsistent state. In production cluster, such insistent state can happen
    // even if recommission command was issued on ANN first given the async nature
    // of the system.

    // Step 3.a, ask SBN to recomm the first DN.
    // SBN has been fixed so that it no longer invalidates excess replica during
    // recommission.
    // Before the fix, SBN could get into the following state.
    //    1. the last DN would have been chosen as excess replica, given its
    //    heartbeat is considered old.
    //    Please refer to BlockPlacementPolicyDefault#chooseReplicaToDelete
    //    2. After recomissionNode finishes, SBN has 3 live replicas ( 0, 1, 2 )
    //    and one excess replica ( 3 )
    // After the fix,
    //    After recomissionNode finishes, SBN has 4 live replicas ( 0, 1, 2, 3 )
    Thread.sleep(slowHeartbeatDNwaitTime);
    recomissionNode(1, decomNodeFromSBN);

    // Step 3.b, ask ANN to recommission the first DN.
    // To verify the fix, the test makes sure the excess replica picked by ANN
    // is different from the one picked by SBN before the fix.
    // To achieve that, we make sure next-to-last DN is chosen as excess replica
    // by ANN.
    // 1. restore LastDNprop's heartbeat interval.
    // 2. Make next-to-last DN's heartbeat slow.
    MiniDFSCluster.DataNodeProperties LastDNprop = cluster.stopDataNode(3);
    LastDNprop.conf.setLong(
        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
    cluster.restartDataNode(LastDNprop);

    MiniDFSCluster.DataNodeProperties nextToLastDNprop = cluster.stopDataNode(2);
    nextToLastDNprop.conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 30);
    cluster.restartDataNode(nextToLastDNprop);
    cluster.waitActive();
    Thread.sleep(slowHeartbeatDNwaitTime);
    recomissionNode(0, decommissionedNodeFromANN);

    // Step 3.c, make sure the DN has deleted the block and report to NNs
    cluster.triggerHeartbeats();
    HATestUtil.waitForDNDeletions(cluster);
    cluster.triggerDeletionReports();

    // Step 4, decommission the first DN on both ANN and SBN
    // With the fix to make sure SBN no longer marks excess replica
    // during recommission, SBN's decommission can finish properly
    decommissionNode(0, firstDN.getDatanodeUuid(), null,
        AdminStates.DECOMMISSIONED);

    // Ask SBN to decomm the first DN
    decommissionNode(1, firstDN.getDatanodeUuid(), null,
        AdminStates.DECOMMISSIONED);

    cluster.shutdown();

  }
View Full Code Here

    File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
    Configuration conf = cluster.getConfiguration(0);
    // 1. Test unsupported schema. Only "file:" is supported.
    String dnDir = makeURI("shv", null, fileAsURI(dataDir).getPath());
    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dnDir);
    DataNode dn = null;
    try {
      dn = DataNode.createDataNode(new String[]{}, conf);
      fail();
    } catch(Exception e) {
      // expecting exception here
    } finally {
      if (dn != null) {
        dn.shutdown();
      }
    }
    assertNull("Data-node startup should have failed.", dn);

    // 2. Test "file:" schema and no schema (path-only). Both should work.
View Full Code Here

    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
      makeURI("file", null, fileAsURI(dataDir).getPath()));
    long prevLimit = conf.
        getLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
            DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT);
    DataNode dn = null;
    try {
      // Try starting the DN with limit configured to the ulimit
      conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          memlockLimit);
      dn = DataNode.createDataNode(new String[]{},  conf);
      dn.shutdown();
      dn = null;
      // Try starting the DN with a limit > ulimit
      conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          memlockLimit+1);
      try {
        dn = DataNode.createDataNode(new String[]{}, conf);
      } catch (RuntimeException e) {
        GenericTestUtils.assertExceptionContains(
            "more than the datanode's available RLIMIT_MEMLOCK", e);
      }
    } finally {
      if (dn != null) {
        dn.shutdown();
      }
      conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          prevLimit);
    }
  }
View Full Code Here

    createNameNode(nnIndex, conf, numDataNodes, true, null, null,
        nameserviceId, nnId);

    // Refresh datanodes with the newly started namenode
    for (DataNodeProperties dn : dataNodes) {
      DataNode datanode = dn.datanode;
      datanode.refreshNamenodes(conf);
    }

    // Wait for new namenode to get registrations from all the datanodes
    waitActive(nnIndex);
    return nameNodes[nnIndex].nameNode;
View Full Code Here

    final ExtendedBlock blk = lb.getBlock();
    assertEquals(len1, lb.getBlockSize());

    DatanodeInfo[] datanodeinfos = lb.getLocations();
    assertEquals(repl, datanodeinfos.length);
    final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
    final File f = DataNodeTestUtils.getBlockFile(
        dn, blk.getBlockPoolId(), blk.getLocalBlock());
    final RandomAccessFile raf = new RandomAccessFile(f, "rw");
    AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
    assertEquals(len1, raf.length());
View Full Code Here

      final long size = lb.getBlockSize();
      if (i < numblock - 1) {
        assertEquals(BLOCK_SIZE, size);
      }
      for(DatanodeInfo datanodeinfo : lb.getLocations()) {
        final DataNode dn = cluster.getDataNode(datanodeinfo.getIpcPort());
        final Block metainfo = DataNodeTestUtils.getFSDataset(dn).getStoredBlock(
            blk.getBlockPoolId(), blk.getBlockId());
        assertEquals(size, metainfo.getNumBytes());
      }
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.datanode.DataNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.