Examples of DataNode


Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

  /**
   * Wait for the given datanode to heartbeat once.
   */
  public void waitForDNHeartbeat(int dnIndex, long timeoutMillis)
    throws IOException, InterruptedException {
    DataNode dn = getDataNodes().get(dnIndex);
    InetSocketAddress addr = new InetSocketAddress("localhost",
                                                   getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);

    long startTime = System.currentTimeMillis();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

    }, 500, 60000);

    // Send a cache report referring to a bogus block.  It is important that
    // the NameNode be robust against this.
    NamenodeProtocols nnRpc = namenode.getRpcServer();
    DataNode dn0 = cluster.getDataNodes().get(0);
    String bpid = cluster.getNamesystem().getBlockPoolId();
    LinkedList<Long> bogusBlockIds = new LinkedList<Long> ();
    bogusBlockIds.add(999999L);
    nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);

    Path rootDir = helper.getDefaultWorkingDirectory(dfs);
    // Create the pool
    final String pool = "friendlyPool";
    nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

          namesystem.getInServiceXceiverAverage(), EPSILON);
     
      // shutdown half the nodes and force a heartbeat check to ensure
      // counts are accurate
      for (int i=0; i < nodes/2; i++) {
        DataNode dn = datanodes.get(i);
        DatanodeDescriptor dnd = dnm.getDatanode(dn.getDatanodeId());
        dn.shutdown();
        dnd.setLastUpdate(0L);
        BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
        expectedInServiceNodes--;
        assertEquals(expectedInServiceNodes, namesystem.getNumLiveDataNodes());
        assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
      }

      // restart the nodes to verify that counts are correct after
      // node re-registration
      cluster.restartDataNodes();
      cluster.waitActive();
      datanodes = cluster.getDataNodes();
      expectedInServiceNodes = nodes;
      assertEquals(nodes, datanodes.size());
      assertEquals(nodes, namesystem.getNumLiveDataNodes());
      assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
      assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
      assertEquals((double)expectedInServiceLoad/expectedInServiceLoad,
          namesystem.getInServiceXceiverAverage(), EPSILON);
     
      // create streams and hsync to force datastreamers to start
      DFSOutputStream[] streams = new DFSOutputStream[fileCount];
      for (int i=0; i < fileCount; i++) {
        streams[i] = (DFSOutputStream)fs.create(new Path("/f"+i), fileRepl)
            .getWrappedStream();
        streams[i].write("1".getBytes());
        streams[i].hsync();
        // the load for writers is 2 because both the write xceiver & packet
        // responder threads are counted in the load
        expectedTotalLoad += 2*fileRepl;
        expectedInServiceLoad += 2*fileRepl;
      }
      // force nodes to send load update
      triggerHeartbeats(datanodes);
      assertEquals(nodes, namesystem.getNumLiveDataNodes());
      assertEquals(expectedInServiceNodes,
          namesystem.getNumDatanodesInService());
      assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
      assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
          namesystem.getInServiceXceiverAverage(), EPSILON);

      // decomm a few nodes, substract their load from the expected load,
      // trigger heartbeat to force load update
      for (int i=0; i < fileRepl; i++) {
        expectedInServiceNodes--;
        DatanodeDescriptor dnd =
            dnm.getDatanode(datanodes.get(i).getDatanodeId());
        expectedInServiceLoad -= dnd.getXceiverCount();
        dnm.startDecommission(dnd);
        DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
        Thread.sleep(100);
        assertEquals(nodes, namesystem.getNumLiveDataNodes());
        assertEquals(expectedInServiceNodes,
            namesystem.getNumDatanodesInService());
        assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
        assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
            namesystem.getInServiceXceiverAverage(), EPSILON);
      }
     
      // check expected load while closing each stream.  recalc expected
      // load based on whether the nodes in the pipeline are decomm
      for (int i=0; i < fileCount; i++) {
        int decomm = 0;
        for (DatanodeInfo dni : streams[i].getPipeline()) {
          DatanodeDescriptor dnd = dnm.getDatanode(dni);
          expectedTotalLoad -= 2;
          if (dnd.isDecommissionInProgress() || dnd.isDecommissioned()) {
            decomm++;
          } else {
            expectedInServiceLoad -= 2;
          }
        }
        try {
          streams[i].close();
        } catch (IOException ioe) {
          // nodes will go decommissioned even if there's a UC block whose
          // other locations are decommissioned too.  we'll ignore that
          // bug for now
          if (decomm < fileRepl) {
            throw ioe;
          }
        }
        triggerHeartbeats(datanodes);
        // verify node count and loads
        assertEquals(nodes, namesystem.getNumLiveDataNodes());
        assertEquals(expectedInServiceNodes,
            namesystem.getNumDatanodesInService());
        assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
        assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
            namesystem.getInServiceXceiverAverage(), EPSILON);
      }

      // shutdown each node, verify node counts based on decomm state
      for (int i=0; i < nodes; i++) {
        DataNode dn = datanodes.get(i);
        dn.shutdown();
        // force it to appear dead so live count decreases
        DatanodeDescriptor dnDesc = dnm.getDatanode(dn.getDatanodeId());
        dnDesc.setLastUpdate(0L);
        BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
        assertEquals(nodes-1-i, namesystem.getNumLiveDataNodes());
        // first few nodes are already out of service
        if (i >= fileRepl) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

    DatanodeInfo[] datanodeinfos = lblock.getLocations();
    ExtendedBlock block = lblock.getBlock();
    // corrupt some /all of the block replicas
    for (int i = 0; i < corruptBlockCount; i++) {
      DatanodeInfo dninfo = datanodeinfos[i];
      final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
      corruptBlock(block, dn);
      LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
          + dninfo);

    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

  }

  @VisibleForTesting
  Token<DelegationTokenIdentifier> deserializeToken
          (String delegation,String nnId) throws IOException {
    final DataNode datanode = (DataNode) context.getAttribute("datanode");
    final Configuration conf = datanode.getConf();
    final Token<DelegationTokenIdentifier> token = new
            Token<DelegationTokenIdentifier>();
    token.decodeFromUrlString(delegation);
    URI nnUri = URI.create(HdfsConstants.HDFS_URI_SCHEME +
            "://" + nnId);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

      final OverwriteParam overwrite,
      final BufferSizeParam bufferSize,
      final ReplicationParam replication,
      final BlockSizeParam blockSize
      ) throws IOException, URISyntaxException {
    final DataNode datanode = (DataNode)context.getAttribute("datanode");

    switch(op.getValue()) {
    case CREATE:
    {
      final Configuration conf = new Configuration(datanode.getConf());
      conf.set(FsPermission.UMASK_LABEL, "000");

      final int b = bufferSize.getValue(conf);
      DFSClient dfsclient = newDfsClient(nnId, conf);
      FSDataOutputStream out = null;
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

      final String nnId,
      final String fullpath,
      final PostOpParam op,
      final BufferSizeParam bufferSize
      ) throws IOException {
    final DataNode datanode = (DataNode)context.getAttribute("datanode");

    switch(op.getValue()) {
    case APPEND:
    {
      final Configuration conf = new Configuration(datanode.getConf());
      final int b = bufferSize.getValue(conf);
      DFSClient dfsclient = newDfsClient(nnId, conf);
      FSDataOutputStream out = null;
      try {
        out = dfsclient.append(fullpath, b, null, null);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

      final GetOpParam op,
      final OffsetParam offset,
      final LengthParam length,
      final BufferSizeParam bufferSize
      ) throws IOException {
    final DataNode datanode = (DataNode)context.getAttribute("datanode");
    final Configuration conf = new Configuration(datanode.getConf());

    switch(op.getValue()) {
    case OPEN:
    {
      final int b = bufferSize.getValue(conf);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

          "but found to take " + diskSpace + " bytes", fileSize, diskSpace);
     
      // Check storage usage
      // can't check capacities for real storage since the OS file system may be changing under us.
      if (simulatedStorage) {
        DataNode dn = cluster.getDataNodes().get(0);
        FsDatasetSpi<?> dataset = DataNodeTestUtils.getFSDataset(dn);
        assertEquals(fileSize, dataset.getDfsUsed());
        assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY-fileSize,
            dataset.getRemaining());
      }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

          f, 0, Long.MAX_VALUE);
      assertEquals(1, locations.locatedBlockCount());
      LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
      int successcount = 0;
      for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
        DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort());
        ExtendedBlock blk = locatedblock.getBlock();
        Block b = DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(
            blk.getBlockPoolId(), blk.getBlockId());
        final File blockfile = DataNodeTestUtils.getFile(datanode,
            blk.getBlockPoolId(), b.getBlockId());
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.