Package org.apache.hadoop.hdfs.server.protocol

Examples of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage


    // all blocks belong to the same file, hence same BP
    DataNode dn = cluster.getDataNodes().get(DN_N0);
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    StorageBlockReport[] report = { new StorageBlockReport(
        new DatanodeStorage(dnR.getStorageID()),
        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);

    List<LocatedBlock> blocksAfterReport =
      DFSTestUtil.getAllBlocks(fs.open(filePath));
View Full Code Here


    // all blocks belong to the same file, hence same BP
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn0.getDNRegistrationForBP(poolId);
    StorageBlockReport[] report = { new StorageBlockReport(
        new DatanodeStorage(dnR.getStorageID()),
        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);

    BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem()
        .getBlockManager());
View Full Code Here

    // all blocks belong to the same file, hence same BP
    DataNode dn = cluster.getDataNodes().get(DN_N0);
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    StorageBlockReport[] report = { new StorageBlockReport(
        new DatanodeStorage(dnR.getStorageID()),
        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
    DatanodeCommand dnCmd =
      cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
    if(LOG.isDebugEnabled()) {
      LOG.debug("Got the command: " + dnCmd);
View Full Code Here

// all blocks belong to the same file, hence same BP
    DataNode dn = cluster.getDataNodes().get(DN_N1);
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    StorageBlockReport[] report = { new StorageBlockReport(
        new DatanodeStorage(dnR.getStorageID()),
        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
    printStats();
    assertEquals("Wrong number of PendingReplication Blocks",
      0, cluster.getNamesystem().getUnderReplicatedBlocks());
View Full Code Here

    // all blocks belong to the same file, hence same BP
    DataNode dn = cluster.getDataNodes().get(DN_N1);
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    StorageBlockReport[] report = { new StorageBlockReport(
        new DatanodeStorage(dnR.getStorageID()),
        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
    printStats();
    assertEquals("Wrong number of Corrupted blocks",
      1, cluster.getNamesystem().getCorruptReplicaBlocks() +
// the following might have to be added into the equation if
// the same block could be in two different states at the same time
// and then the expected number of has to be changed to '2'       
//        cluster.getNamesystem().getPendingReplicationBlocks() +
        cluster.getNamesystem().getPendingDeletionBlocks());

    // Get another block and screw its length to be less than original
    if (randIndex == 0)
      randIndex++;
    else
      randIndex--;
    corruptedBlock = blocks.get(randIndex);
    corruptBlockLen(corruptedBlock);
    if(LOG.isDebugEnabled()) {
      LOG.debug("Done corrupting length of " + corruptedBlock.getBlockName());
    }
   
    report[0] = new StorageBlockReport(
        new DatanodeStorage(dnR.getStorageID()),
        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
    printStats();

    assertEquals("Wrong number of Corrupted blocks",
View Full Code Here

      // all blocks belong to the same file, hence same BP
      DataNode dn = cluster.getDataNodes().get(DN_N1);
      String poolId = cluster.getNamesystem().getBlockPoolId();
      DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
      StorageBlockReport[] report = { new StorageBlockReport(
          new DatanodeStorage(dnR.getStorageID()),
          new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
      cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
      printStats();
      assertEquals("Wrong number of PendingReplication blocks",
        blocks.size(), cluster.getNamesystem().getPendingReplicationBlocks());
View Full Code Here

      // all blocks belong to the same file, hence same BP
      DataNode dn = cluster.getDataNodes().get(DN_N1);
      String poolId = cluster.getNamesystem().getBlockPoolId();
      DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
      StorageBlockReport[] report = { new StorageBlockReport(
          new DatanodeStorage(dnR.getStorageID()),
          new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
      cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
      printStats();
      assertEquals("Wrong number of PendingReplication blocks",
        2, cluster.getNamesystem().getPendingReplicationBlocks());
View Full Code Here

    DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
    String bpid = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
    final StorageBlockReport[] report = {
        new StorageBlockReport(
            new DatanodeStorage(dnR.getStorageID()),
            DataNodeTestUtils.getFSDataset(dn).getBlockReport(bpid
                ).getBlockListAsLongs())
    };
    cluster.getNameNodeRpc().blockReport(dnR, bpid, report);
View Full Code Here

    // all blocks belong to the same file, hence same BP
    DataNode dn = cluster.getDataNodes().get(DN_N0);
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    StorageBlockReport[] report = { new StorageBlockReport(
        new DatanodeStorage(dnR.getStorageID()),
        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);

    List<LocatedBlock> blocksAfterReport =
      DFSTestUtil.getAllBlocks(fs.open(filePath));
View Full Code Here

    // all blocks belong to the same file, hence same BP
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn0.getDNRegistrationForBP(poolId);
    StorageBlockReport[] report = { new StorageBlockReport(
        new DatanodeStorage(dnR.getStorageID()),
        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);

    BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem()
        .getBlockManager());
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.