Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.BlockListAsLongs


      LOG.debug("Block " + b.getBlockName() + " after\t " + "Size " +
        b.getNumBytes());
    }
    cluster.getNameNode().blockReport(
      cluster.getDataNodes().get(DN_N0).dnRegistration,
      new BlockListAsLongs(blocks, null).getBlockListAsLongs());

    List<LocatedBlock> blocksAfterReport =
      DFSTestUtil.getAllBlocks(fs.open(filePath));

    LOG.debug("After mods: Number of blocks allocated " +
View Full Code Here


    waitTil(DN_RESCAN_EXTRA_WAIT);

    cluster.getNameNode().blockReport(
      cluster.getDataNodes().get(DN_N0).dnRegistration,
      new BlockListAsLongs(blocks, null).getBlockListAsLongs());

    cluster.getNamesystem().computeDatanodeWork();

    printStats();
View Full Code Here

    // This new block is unknown to NN and will be mark for deletion.
    blocks.add(new Block());
    DatanodeCommand dnCmd =
      cluster.getNameNode().blockReport(
        cluster.getDataNodes().get(DN_N0).dnRegistration,
        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
    LOG.debug("Got the command: " + dnCmd);
    printStats();

    assertEquals("Wrong number of CorruptedReplica+PendingDeletion " +
      "blocks is found", 2,
View Full Code Here

    ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
    startDNandWait(filePath, true);

    cluster.getNameNode().blockReport(
      cluster.getDataNodes().get(DN_N1).dnRegistration,
      new BlockListAsLongs(blocks, null).getBlockListAsLongs());
    printStats();
    assertEquals("Wrong number of PendingReplication Blocks",
      0, cluster.getNamesystem().getUnderReplicatedBlocks());
  }
View Full Code Here

    LOG.debug("BlockGS after " + blocks.get(randIndex).getGenerationStamp());

    LOG.debug("Done corrupting GS of " + corruptedBlock.getBlockName());
    cluster.getNameNode().blockReport(
      cluster.getDataNodes().get(DN_N1).dnRegistration,
      new BlockListAsLongs(blocks, null).getBlockListAsLongs());
    printStats();
    assertEquals("Wrong number of Corrupted blocks",
      1, cluster.getNamesystem().getCorruptReplicaBlocks() +
// the following might have to be added into the equation if
// the same block could be in two different states at the same time
// and then the expected number of has to be changed to '2'       
//        cluster.getNamesystem().getPendingReplicationBlocks() +
        cluster.getNamesystem().getPendingDeletionBlocks());

    // Get another block and screw its length to be less than original
    if (randIndex == 0)
      randIndex++;
    else
      randIndex--;
    corruptedBlock = blocks.get(randIndex);
    corruptBlockLen(corruptedBlock);
    LOG.debug("Done corrupting length of " + corruptedBlock.getBlockName());
    cluster.getNameNode().blockReport(
      cluster.getDataNodes().get(DN_N1).dnRegistration,
      new BlockListAsLongs(blocks, null).getBlockListAsLongs());
    printStats();

    assertEquals("Wrong number of Corrupted blocks",
      2, cluster.getNamesystem().getCorruptReplicaBlocks() +
        cluster.getNamesystem().getPendingReplicationBlocks() +
View Full Code Here

      waitForTempReplica(bl, DN_N1);

      cluster.getNameNode().blockReport(
        cluster.getDataNodes().get(DN_N1).dnRegistration,
        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
      printStats();
      assertEquals("Wrong number of PendingReplication blocks",
        blocks.size(), cluster.getNamesystem().getPendingReplicationBlocks());

      try {
View Full Code Here

      waitForTempReplica(bl, DN_N1);
                                               
      cluster.getNameNode().blockReport(
        cluster.getDataNodes().get(DN_N1).dnRegistration,
        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
      printStats();
      assertEquals("Wrong number of PendingReplication blocks",
        2, cluster.getNamesystem().getPendingReplicationBlocks());
     
      try {
View Full Code Here

      }
    }
    if (count != blockTable.length) {
      blockTable = Arrays.copyOf(blockTable, count);
    }
    return new BlockListAsLongs(
        new ArrayList<Block>(Arrays.asList(blockTable)), null);
  }
View Full Code Here

    for (FsVolumeSpi v : volumes.volumes) {
      ArrayList<ReplicaInfo> finalizedList = finalized.get(v.getStorageID());
      ArrayList<ReplicaInfo> ucList = uc.get(v.getStorageID());
      blockReportsMap.put(((FsVolumeImpl) v).toDatanodeStorage(),
                          new BlockListAsLongs(finalizedList, ucList));
    }

    return blockReportsMap;
  }
View Full Code Here

          new StorageBlockReport[perVolumeBlockLists.size()];

      int i = 0;
      for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
        DatanodeStorage dnStorage = kvPair.getKey();
        BlockListAsLongs blockList = kvPair.getValue();
        totalBlockCount += blockList.getNumberOfBlocks();

        reports[i++] =
            new StorageBlockReport(
              dnStorage, blockList.getBlockListAsLongs());
      }

      cmd = bpNamenode.blockReport(bpRegistration, bpos.getBlockPoolId(), reports);

      // Log the block report processing stats from Datanode perspective
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.BlockListAsLongs

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.