Examples of BlockManager


Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    FSNamesystem fsName = mock(FSNamesystem.class);
    BlockManager blockManager = mock(BlockManager.class);
    DatanodeManager dnManager = mock(DatanodeManager.class);
   
    when(namenode.getNamesystem()).thenReturn(fsName);
    when(fsName.getBlockLocations(anyString(), anyLong(), anyLong(),
        anyBoolean(), anyBoolean(), anyBoolean())).
        thenThrow(new FileNotFoundException()) ;
    when(fsName.getBlockManager()).thenReturn(blockManager);
    when(blockManager.getDatanodeManager()).thenReturn(dnManager);

    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
        NUM_REPLICAS, (short)1, remoteAddress);

    String pathString = "/tmp/testFile";
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

    verifyRequest(nodeReg);
    if(blockStateChangeLog.isDebugEnabled()) {
      blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
           + "from " + nodeReg + ", reports.length=" + reports.length);
    }
    final BlockManager bm = namesystem.getBlockManager();
    boolean noStaleStorages = false;
    for(StorageBlockReport r : reports) {
      final BlockListAsLongs blocks = new BlockListAsLongs(r.getBlocks());
      //
      // BlockManager.processReport accumulates information of prior calls
      // for the same node and storage, so the value returned by the last
      // call of this loop is the final updated value for noStaleStorage.
      //
      noStaleStorages = bm.processReport(nodeReg, r.getStorage(), poolId, blocks);
      metrics.incrStorageBlockReportOps();
    }

    if (nn.getFSImage().isUpgradeFinalized() &&
        !nn.isStandbyState() &&
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

    try {
      resourceRecheckInterval = conf.getLong(
          DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
          DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);

      this.blockManager = new BlockManager(this, this, conf);
      this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
      this.blockIdGenerator = new SequentialBlockIdGenerator(this.blockManager);

      this.fsOwner = UserGroupInformation.getCurrentUser();
      this.fsOwnerShortUserName = fsOwner.getShortUserName();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

    try {
      resourceRecheckInterval = conf.getLong(
          DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
          DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);

      this.blockManager = new BlockManager(this, this, conf);
      this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();

      this.fsOwner = UserGroupInformation.getCurrentUser();
      this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
                                 DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

  }

  static DatanodeInfo chooseDatanode(final NameNode namenode,
      final String path, final HttpOpParam.Op op, final long openOffset,
      final long blocksize, Configuration conf) throws IOException {
    final BlockManager bm = namenode.getNamesystem().getBlockManager();

    if (op == PutOpParam.Op.CREATE) {
      //choose a datanode near to client
      final DatanodeDescriptor clientNode = bm.getDatanodeManager(
          ).getDatanodeByHost(getRemoteAddress());
      if (clientNode != null) {
        final DatanodeDescriptor[] datanodes = bm.getBlockPlacementPolicy()
            .chooseTarget(path, 1, clientNode,
                new ArrayList<DatanodeDescriptor>(), false, null, blocksize);
        if (datanodes.length > 0) {
          return datanodes[0];
        }
      }
    } else if (op == GetOpParam.Op.OPEN
        || op == GetOpParam.Op.GETFILECHECKSUM
        || op == PostOpParam.Op.APPEND) {
      //choose a datanode containing a replica
      final NamenodeProtocols np = namenode.getRpcServer();
      final HdfsFileStatus status = np.getFileInfo(path);
      if (status == null) {
        throw new FileNotFoundException("File " + path + " not found.");
      }
      final long len = status.getLen();
      if (op == GetOpParam.Op.OPEN) {
        if (openOffset < 0L || (openOffset >= len && len > 0)) {
          throw new IOException("Offset=" + openOffset
              + " out of the range [0, " + len + "); " + op + ", path=" + path);
        }
      }

      if (len > 0) {
        final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
        final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
        final int count = locations.locatedBlockCount();
        if (count > 0) {
          return JspHelper.bestNode(locations.get(0).getLocations(), false, conf);
        }
      }
    }

    return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
        ).chooseRandom(NodeBase.ROOT);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

    public void updateBlocksMap(INodeFile file) {
      // Add file->block mapping
      final BlockInfo[] blocks = file.getBlocks();
      if (blocks != null) {
        final BlockManager bm = namesystem.getBlockManager();
        for (int i = 0; i < blocks.length; i++) {
          file.setBlock(i, bm.addBlockCollection(blocks[i], file));
        }
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

  @VisibleForTesting
  static DatanodeInfo chooseDatanode(final NameNode namenode,
      final String path, final HttpOpParam.Op op, final long openOffset,
      final long blocksize) throws IOException {
    final BlockManager bm = namenode.getNamesystem().getBlockManager();

    if (op == PutOpParam.Op.CREATE) {
      //choose a datanode near to client
      final DatanodeDescriptor clientNode = bm.getDatanodeManager(
          ).getDatanodeByHost(getRemoteAddress());
      if (clientNode != null) {
        final DatanodeStorageInfo[] storages = bm.getBlockPlacementPolicy()
            .chooseTarget(path, 1, clientNode,
                new ArrayList<DatanodeStorageInfo>(), false, null, blocksize,
                // TODO: get storage type from the file
                StorageType.DEFAULT);
        if (storages.length > 0) {
          return storages[0].getDatanodeDescriptor();
        }
      }
    } else if (op == GetOpParam.Op.OPEN
        || op == GetOpParam.Op.GETFILECHECKSUM
        || op == PostOpParam.Op.APPEND) {
      //choose a datanode containing a replica
      final NamenodeProtocols np = getRPCServer(namenode);
      final HdfsFileStatus status = np.getFileInfo(path);
      if (status == null) {
        throw new FileNotFoundException("File " + path + " not found.");
      }
      final long len = status.getLen();
      if (op == GetOpParam.Op.OPEN) {
        if (openOffset < 0L || (openOffset >= len && len > 0)) {
          throw new IOException("Offset=" + openOffset
              + " out of the range [0, " + len + "); " + op + ", path=" + path);
        }
      }

      if (len > 0) {
        final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
        final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
        final int count = locations.locatedBlockCount();
        if (count > 0) {
          return bestNode(locations.get(0).getLocations());
        }
      }
    }

    return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
        ).chooseRandom(NodeBase.ROOT);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

        DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
        DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
    nnResourceChecker = new NameNodeResourceChecker(conf);
    checkAvailableResources();
    this.systemStart = now();
    this.blockManager = new BlockManager(this, this, conf);
    this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
    this.fsLock = new ReentrantReadWriteLock(true); // fair locking
    setConfigurationParameters(conf);
    // For testing purposes, allow the DT secret manager to be started regardless
    // of whether security is enabled.
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

   * dirs is a list of directories where the filesystem directory state
   * is stored
   */
  FSNamesystem(FSImage fsImage, Configuration conf) throws IOException {
    this.fsLock = new ReentrantReadWriteLock(true);
    this.blockManager = new BlockManager(this, this, conf);
    setConfigurationParameters(conf);
    this.dir = new FSDirectory(fsImage, this, conf);
    dtSecretManager = createDelegationTokenSecretManager(conf);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager

      //start a cluster with single datanode
      cluster = new MiniDFSCluster.Builder(conf).build();
      cluster.waitActive();

      final BlockManager bm = cluster.getNamesystem().getBlockManager();
      DistributedFileSystem dfs =
          cluster.getFileSystem();

      // create a normal file
      DFSTestUtil.createFile(dfs, new Path("/testMissingBlocksAlert/file1"),
                             fileLen, (short)3, 0);

      Path corruptFile = new Path("/testMissingBlocks/corruptFile");
      DFSTestUtil.createFile(dfs, corruptFile, fileLen, (short)3, 0);


      // Corrupt the block
      ExtendedBlock block = DFSTestUtil.getFirstBlock(dfs, corruptFile);
      assertTrue(TestDatanodeBlockScanner.corruptReplica(block, 0));

      // read the file so that the corrupt block is reported to NN
      FSDataInputStream in = dfs.open(corruptFile);
      try {
        in.readFully(new byte[fileLen]);
      } catch (ChecksumException ignored) { // checksum error is expected.     
      }
      in.close();

      LOG.info("Waiting for missing blocks count to increase...");

      while (dfs.getMissingBlocksCount() <= 0) {
        Thread.sleep(100);
      }
      assertTrue(dfs.getMissingBlocksCount() == 1);
      assertEquals(4, dfs.getUnderReplicatedBlocksCount());
      assertEquals(3, bm.getUnderReplicatedNotMissingBlocks());


      // Now verify that it shows up on webui
      URL url = new URL("http://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY) +
                        "/dfshealth.jsp");
      String dfsFrontPage = DFSTestUtil.urlGet(url);
      String warnStr = "WARNING : There are ";
      assertTrue("HDFS Front page does not contain expected warning",
                 dfsFrontPage.contains(warnStr + "1 missing blocks"));

      // now do the reverse : remove the file expect the number of missing
      // blocks to go to zero

      dfs.delete(corruptFile, true);

      LOG.info("Waiting for missing blocks count to be zero...");
      while (dfs.getMissingBlocksCount() > 0) {
        Thread.sleep(100);
      }

      assertEquals(2, dfs.getUnderReplicatedBlocksCount());
      assertEquals(2, bm.getUnderReplicatedNotMissingBlocks());

      // and make sure WARNING disappears
      // Now verify that it shows up on webui
      dfsFrontPage = DFSTestUtil.urlGet(url);
      assertFalse("HDFS Front page contains unexpected warning",
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.