Examples of BlockManager


Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

    conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
        "need to set a dummy value here so it assumes a multi-rack cluster");
    fsn = Mockito.mock(FSNamesystem.class);
    Mockito.doReturn(true).when(fsn).hasWriteLock();
    bm = new BlockManager(fsn, fsn, conf);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

      //start a cluster with single datanode
      cluster = new MiniDFSCluster.Builder(conf).build();
      cluster.waitActive();

      final BlockManager bm = cluster.getNamesystem().getBlockManager();
      DistributedFileSystem dfs =
                            (DistributedFileSystem) cluster.getFileSystem();

      // create a normal file
      DFSTestUtil.createFile(dfs, new Path("/testMissingBlocksAlert/file1"),
                             fileLen, (short)3, 0);

      Path corruptFile = new Path("/testMissingBlocks/corruptFile");
      DFSTestUtil.createFile(dfs, corruptFile, fileLen, (short)3, 0);


      // Corrupt the block
      ExtendedBlock block = DFSTestUtil.getFirstBlock(dfs, corruptFile);
      assertTrue(TestDatanodeBlockScanner.corruptReplica(block, 0));

      // read the file so that the corrupt block is reported to NN
      FSDataInputStream in = dfs.open(corruptFile);
      try {
        in.readFully(new byte[fileLen]);
      } catch (ChecksumException ignored) { // checksum error is expected.     
      }
      in.close();

      LOG.info("Waiting for missing blocks count to increase...");

      while (dfs.getMissingBlocksCount() <= 0) {
        Thread.sleep(100);
      }
      assertTrue(dfs.getMissingBlocksCount() == 1);
      assertEquals(4, dfs.getUnderReplicatedBlocksCount());
      assertEquals(3, bm.getUnderReplicatedNotMissingBlocks());


      // Now verify that it shows up on webui
      URL url = new URL("http://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY) +
                        "/dfshealth.jsp");
      String dfsFrontPage = DFSTestUtil.urlGet(url);
      String warnStr = "WARNING : There are ";
      assertTrue("HDFS Front page does not contain expected warning",
                 dfsFrontPage.contains(warnStr + "1 missing blocks"));

      // now do the reverse : remove the file expect the number of missing
      // blocks to go to zero

      dfs.delete(corruptFile, true);

      LOG.info("Waiting for missing blocks count to be zero...");
      while (dfs.getMissingBlocksCount() > 0) {
        Thread.sleep(100);
      }

      assertEquals(2, dfs.getUnderReplicatedBlocksCount());
      assertEquals(2, bm.getUnderReplicatedNotMissingBlocks());

      // and make sure WARNING disappears
      // Now verify that it shows up on webui
      dfsFrontPage = DFSTestUtil.urlGet(url);
      assertFalse("HDFS Front page contains unexpected warning",
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

  @Test
  public void testRetryAddBlockWhileInChooseTarget() throws Exception {
    final String src = "/testRetryAddBlockWhileInChooseTarget";

    FSNamesystem ns = cluster.getNamesystem();
    BlockManager spyBM = spy(ns.getBlockManager());
    final NamenodeProtocols nn = cluster.getNameNodeRpc();

    // substitute mocked BlockManager into FSNamesystem
    Class<? extends FSNamesystem> nsClass = ns.getClass();
    Field bmField = nsClass.getDeclaredField("blockManager");
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

        @Override
        public Object run() throws Exception {
          NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
         
          final FSNamesystem namesystem = nn.getNamesystem();
          final BlockManager bm = namesystem.getBlockManager();
          final int totalDatanodes =
              namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE);
          new NamenodeFsck(conf, nn,
              bm.getDatanodeManager().getNetworkTopology(), pmap, out,
              totalDatanodes, bm.minReplication, remoteAddress).fsck();
         
          return null;
        }
      });
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

    verifyRequest(nodeReg);
    if(blockStateChangeLog.isDebugEnabled()) {
      blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
           + "from " + nodeReg + ", reports.length=" + reports.length);
    }
    final BlockManager bm = namesystem.getBlockManager();
    boolean hasStaleStorages = true;
    for(StorageBlockReport r : reports) {
      final BlockListAsLongs blocks = new BlockListAsLongs(r.getBlocks());
      hasStaleStorages = bm.processReport(nodeReg, r.getStorage(), poolId, blocks);
    }

    if (nn.getFSImage().isUpgradeFinalized() &&
        !nn.isStandbyState() &&
        !hasStaleStorages) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

        DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
        DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
    nnResourceChecker = new NameNodeResourceChecker(conf);
    checkAvailableResources();
    this.systemStart = now();
    this.blockManager = new BlockManager(this, this, conf);
    this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
    this.fsLock = new ReentrantReadWriteLock(true); // fair locking
    setConfigurationParameters(conf);
    dtSecretManager = createDelegationTokenSecretManager(conf);
    this.registerMBean(); // register the MBean for the FSNamesystemState
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

   * dirs is a list of directories where the filesystem directory state
   * is stored
   */
  FSNamesystem(FSImage fsImage, Configuration conf) throws IOException {
    this.fsLock = new ReentrantReadWriteLock(true);
    this.blockManager = new BlockManager(this, this, conf);
    setConfigurationParameters(conf);
    this.dir = new FSDirectory(fsImage, this, conf);
    dtSecretManager = createDelegationTokenSecretManager(conf);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

  }

  static DatanodeInfo chooseDatanode(final NameNode namenode,
      final String path, final HttpOpParam.Op op, final long openOffset,
      final long blocksize, Configuration conf) throws IOException {
    final BlockManager bm = namenode.getNamesystem().getBlockManager();

    if (op == PutOpParam.Op.CREATE) {
      //choose a datanode near to client
      final DatanodeDescriptor clientNode = bm.getDatanodeManager(
          ).getDatanodeByHost(getRemoteAddress());
      if (clientNode != null) {
        final DatanodeDescriptor[] datanodes = bm.getBlockPlacementPolicy(
            ).chooseTarget(path, 1, clientNode, null, blocksize);
        if (datanodes.length > 0) {
          return datanodes[0];
        }
      }
    } else if (op == GetOpParam.Op.OPEN
        || op == GetOpParam.Op.GETFILECHECKSUM
        || op == PostOpParam.Op.APPEND) {
      //choose a datanode containing a replica
      final NamenodeProtocols np = namenode.getRpcServer();
      final HdfsFileStatus status = np.getFileInfo(path);
      if (status == null) {
        throw new FileNotFoundException("File " + path + " not found.");
      }
      final long len = status.getLen();
      if (op == GetOpParam.Op.OPEN) {
        if (openOffset < 0L || (openOffset >= len && len > 0)) {
          throw new IOException("Offset=" + openOffset
              + " out of the range [0, " + len + "); " + op + ", path=" + path);
        }
      }

      if (len > 0) {
        final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
        final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
        final int count = locations.locatedBlockCount();
        if (count > 0) {
          return JspHelper.bestNode(locations.get(0).getLocations(), false, conf);
        }
      }
    }

    return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
        ).chooseRandom(NodeBase.ROOT);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

    try {
      resourceRecheckInterval = conf.getLong(
          DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
          DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);

      this.blockManager = new BlockManager(this, this, conf);
      this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();

      this.fsOwner = UserGroupInformation.getCurrentUser();
      this.fsOwnerShortUserName = fsOwner.getShortUserName();
      this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

        DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
        DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
    nnResourceChecker = new NameNodeResourceChecker(conf);
    checkAvailableResources();
    this.systemStart = now();
    this.blockManager = new BlockManager(this, this, conf);
    this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
    this.fsLock = createFsLock(conf);
    setConfigurationParameters(conf);
    // For testing purposes, allow the DT secret manager to be started regardless
    // of whether security is enabled.
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.