Package org.apache.hadoop.hdfs.server.datanode

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode


    }
  }
 
  public void shutdownDataNode(int index, boolean remove) {
    System.out.println("Shutting down DataNode " + index);
    DataNode dn = remove ? dataNodes.remove(index).datanode : dataNodes
        .get(index).datanode;
    dn.shutdown();
    numDataNodes--;
  }
View Full Code Here


  public DataNodeProperties stopDataNode(int i) {
    if (i < 0 || i >= dataNodes.size()) {
      return null;
    }
    DataNodeProperties dnprop = dataNodes.remove(i);
    DataNode dn = dnprop.datanode;
    System.out.println("MiniDFSCluster Stopping DataNode " +
                       dn.getDatanodeInfo() +
                       " from a total of " + (dataNodes.size() + 1) +
                       " datanodes.");
    dn.shutdown();
    numDataNodes--;
    return dnprop;
  }
View Full Code Here

  public synchronized int findDataNodeIndex(String name) {
    int i;
    int namespaceId = getNameNode(0).getNamespaceID();
    try {
      for (i = 0; i < dataNodes.size(); i++) {
        DataNode dn = dataNodes.get(i).datanode;
        if (dn.getDNRegistrationForNS(namespaceId).getName().equals(name)) {
          break;
        }
      }
    } catch (IOException e){
      LOG.error(e);
View Full Code Here

  /**
   * Wait for the given datanode to heartbeat once.
   */
  public void waitForDNHeartbeat(int dnIndex, long timeoutMillis)
    throws IOException, InterruptedException {
    DataNode dn = getDataNodes().get(dnIndex);
    for (int i = 0; i<nameNodes.length; i++) {
      waitForDNHeartbeat(dn, timeoutMillis, i);
    }
  }
View Full Code Here

    createFederatedNameNode(nnIndex, conf, numDataNodes, true, true,
        null, nameserviceId);

    // Refresh datanodes with the newly started namenode
    for (DataNodeProperties dn : dataNodes) {
      DataNode datanode = dn.datanode;
      datanode.refreshNamenodes(conf);
    }

    // Wait for new namenode to get registrations from all the datanodes
    waitActive(true, nnIndex);
    return nameNodes[nnIndex].nameNode;
View Full Code Here

      nextBlockOutputStreamMethod.setAccessible(true);
      DatanodeInfo[] nodes = (DatanodeInfo[]) nextBlockOutputStreamMethod
          .invoke(out.getWrappedStream(), dfs.dfs.getClientName());

      // get data node
      DataNode datanode = cluster.getDataNode(nodes[0].getIpcPort());
      assertTrue(datanode != null);

      // verifies checksum file is of length 0
      LocatedBlockWithMetaInfo locatedblock = TestInterDatanodeProtocol
          .getLastLocatedBlock(dfs.dfs.namenode, filestr);
      Block lastblock = locatedblock.getBlock();
      DataNode.LOG.info("newblocks=" + lastblock);
      BlockPathInfo blockPathInfo = datanode.getBlockPathInfo(lastblock);
      String blockPath = blockPathInfo.getBlockPath();
      String metaPath = blockPathInfo.getMetaPath();

      File f = new File(blockPath);
      File meta = new File(metaPath);
      assertEquals(0, f.length());
      // set the checksum file to 0
      meta.delete();
      DataOutputStream outs = new DataOutputStream(new FileOutputStream(
          metaPath, false));
      outs.close();

      // issue recovery and makit e sure it succeeds.
      int numTries = 500;
      for (int idxTry = 0; idxTry < numTries; idxTry++) {
        boolean success = dfs.recoverLease(filepath);
        if (success) {
          break;
        } else if (idxTry == numTries - 1) {
          TestCase.fail("Recovery lease failed");
        } else {
          Thread.sleep(10);
        }
      }

      // make sure the meta file is still empty
      locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
          dfs.dfs.namenode, filestr);
      Block newBlock = locatedblock.getBlock();
      blockPathInfo = datanode.getBlockPathInfo(newBlock);
      assertEquals(0, blockPathInfo.getNumBytes());
      metaPath = blockPathInfo.getMetaPath();
      meta = new File(metaPath);
      assertEquals(0, meta.length());
View Full Code Here

      MiniDFSCluster miniCluster, DFSClient dfsclient) throws IOException {
    LocatedBlocks locations = dfsclient.namenode.getBlockLocations(filename, 0,
        Long.MAX_VALUE);
    assertEquals(1, locations.locatedBlockCount());
    LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
    DataNode datanode = miniCluster.getDataNode(locatedblock.getLocations()[0]
        .getIpcPort());
    assertTrue(datanode != null);

    Block lastblock = locatedblock.getBlock();
    DataNode.LOG.info("newblocks=" + lastblock);

    return datanode.getBlockPathInfo(lastblock);
  }
View Full Code Here

      DataNode.LOG.info("newblocksizes = " + Arrays.asList(newblocksizes));

      //update blocks with random block sizes
      Block[] newblocks = new Block[REPLICATION_NUM];
      for(int i = 0; i < REPLICATION_NUM; i++) {
        DataNode dn = datanodes[i];
        FSDatasetTestUtil.truncateBlock(dn, lastblock, newblocksizes[i], namespaceId);
        newblocks[i] = new Block(lastblock.getBlockId(), newblocksizes[i],
            lastblock.getGenerationStamp());
        checkMetaInfo(namespaceId, newblocks[i], idps[i]);
      }

      DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
      cluster.getNameNode().append(filestr, dfs.dfs.clientName);

      //block synchronization
      final int primarydatanodeindex = AppendTestUtil.nextInt(datanodes.length);
      DataNode.LOG.info("primarydatanodeindex  =" + primarydatanodeindex);
      DataNode primary = datanodes[primarydatanodeindex];
      DataNode.LOG.info("primary.dnRegistration=" + primary.getDNRegistrationForNS(
          cluster.getNameNode().getNamespaceID()));
      primary.recoverBlocks(namespaceId, new Block[]{lastblock}, new DatanodeInfo[][]{datanodeinfos}).join();

      BlockMetaDataInfo[] updatedmetainfo = new BlockMetaDataInfo[REPLICATION_NUM];
      int minsize = min(newblocksizes);
      long currentGS = cluster.getNameNode().namesystem.getGenerationStamp();
      lastblock.setGenerationStamp(currentGS);
View Full Code Here

                    len == fileSize);
    
        // Check storage usage
        // can't check capacities for real storage since the OS file system may be changing under us.
        if (simulatedStorage) {
          DataNode dn = cluster.getDataNodes().get(0);
          assertEquals(fileSize, dn.getFSDataset().getDfsUsed());
          assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY-fileSize, dn.getFSDataset().getRemaining());
        }
      }
    } finally {
      cluster.shutdown();
    }
View Full Code Here

      assertEquals(1, locations.locatedBlockCount());
      LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
      int successcount = 0;
      int nsId = cluster.getNameNode().getNamespaceID();
      for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
        DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
        FSDataset dataset = (FSDataset)datanode.data;
        Block b = dataset.getStoredBlock(nsId, locatedblock.getBlock().getBlockId());
        File blockfile = dataset.findBlockFile(nsId, b.getBlockId());
        System.out.println("blockfile=" + blockfile);
        if (blockfile != null) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.datanode.DataNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.