Package org.apache.hadoop.hdfs.server.namenode.BlocksMap

Examples of org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo


    int numBlocks = in.readInt();
    BlockInfo[] blocks = new BlockInfo[numBlocks];
    Block blk = new Block();
    for (int i = 0; i < numBlocks; i++) {
      blk.readFields(in);
      blocks[i] = new BlockInfo(blk, blockReplication);
    }
    PermissionStatus perm = PermissionStatus.read(in);
    String clientName = readString(in);
    String clientMachine = readString(in);
View Full Code Here


    }
    if (imgVersion <= -8) {
      blockSize = in.readLong();
    }
    int numBlocks = in.readInt();
    BlockInfo blocks[] = null;

    // for older versions, a blocklist of size 0
    // indicates a directory.
    if ((-9 <= imgVersion && numBlocks > 0) ||
        (imgVersion < -9 && numBlocks >= 0)) {
      blocks = new BlockInfo[numBlocks];
      for (int j = 0; j < numBlocks; j++) {
        blocks[j] = new BlockInfo(replication);
        if (-14 < imgVersion) {
          blocks[j].set(in.readLong(), in.readLong(),
                        Block.GRANDFATHER_GENERATION_STAMP);
        } else {
          blocks[j].readFields(in);
View Full Code Here

      DatanodeIndex indexes) {
    assert head != null : "Head can not be null";
    if (head == block) {
      return head;
    }
    BlockInfo next = block.getSetNext(indexes.currentIndex, head);
    BlockInfo prev = block.getSetPrevious(indexes.currentIndex, null);

    head.setPrevious(indexes.headIndex, block);
    indexes.headIndex = indexes.currentIndex;
    prev.setNext(prev.findDatanode(this), next);
    if (next != null)
      next.setPrevious(next.findDatanode(this), prev);
    return block;
  }
View Full Code Here

                  Collection<Block> toInvalidate,
                  Collection<Block> toRetry,
                  FSNamesystem namesystem) {
    // place a deilimiter in the list which separates blocks
    // that have been reported from those that have not
    BlockInfo delimiter = new BlockInfo(new Block(), 1);
    boolean added = this.addBlock(delimiter);
    assert added : "Delimiting block cannot be present in the node";
    // currently the delimiter is the head
    DatanodeIndex indexes = new DatanodeIndex();
    indexes.headIndex = 0;

    if(newReport == null)
      newReport = new BlockListAsLongs( new long[0]);
    // scan the report and collect newly reported blocks
    // Note we are taking special precaution to limit tmp blocks allocated
    // as part this block report - which why block list is stored as longs
    Block iblk = new Block(); // a fixed new'ed block to be reused with index i
    Block oblk = new Block(); // for fixing genstamps
    for (int i = 0; i < newReport.getNumberOfBlocks(); ++i) {
      iblk.set(newReport.getBlockId(i), newReport.getBlockLen(i),
               newReport.getBlockGenStamp(i));
      BlockInfo storedBlock = blocksMap.getStoredBlock(iblk);
      if(storedBlock == null) {
        // if the block with a WILDCARD generation stamp matches
        // then accept this block.
        // This block has a diferent generation stamp on the datanode
        // because of a lease-recovery-attempt.
        oblk.set(newReport.getBlockId(i), newReport.getBlockLen(i),
                 GenerationStamp.WILDCARD_STAMP);
        storedBlock = blocksMap.getStoredBlock(oblk);
        if (storedBlock != null && storedBlock.getINode() != null &&
            (storedBlock.getGenerationStamp() <= iblk.getGenerationStamp() ||
             storedBlock.getINode().isUnderConstruction())) {
          // accept block. It wil be cleaned up on cluster restart.
        } else {
          storedBlock = null;
        }
      }
      if (storedBlock == null) {
        // If block is not in blocksMap it does not belong to any file
        if (namesystem.getNameNode().shouldRetryAbsentBlock(iblk)) {
          toRetry.add(new Block(iblk));
        } else {
          toInvalidate.add(new Block(iblk));
        }
        continue;
      }
      int index = storedBlock.findDatanode(this);
      if(index < 0) {// Known block, but not on the DN
        // if the size differs from what is in the blockmap, then return
        // the new block. addStoredBlock will then pick up the right size of this
        // block and will update the block object in the BlocksMap
        if (storedBlock.getNumBytes() != iblk.getNumBytes()) {
          toAdd.add(new Block(iblk));
        } else {
          toAdd.add(storedBlock);
        }
        continue;
      }
      indexes.currentIndex = index;
      // move block to the head of the list
      blockList = listMoveToHead(storedBlock, blockList, indexes);
    }
    // collect blocks that have not been reported
    // all of them are next to the delimiter
    Iterator<Block> it = new BlockIterator(delimiter.getNext(0), this);
    while(it.hasNext()) {
      BlockInfo storedBlock = (BlockInfo)it.next();
      INodeFile file = storedBlock.getINode();
      if (file == null || !file.isUnderConstruction()) {
        toRemove.add(storedBlock);
      }
    }
    this.removeBlock(delimiter);
View Full Code Here

    public boolean hasNext() {
      return current != null;
    }

    public BlockInfo next() {
      BlockInfo res = current;
      current = current.getNext(current.findDatanode(node));
      return res;
    }
View Full Code Here

      // versions > 0 support per file replication
      // get name and replication
      final short replication  = fsNamesys.adjustReplication(addCloseOp.replication);

      long blockSize = addCloseOp.blockSize;
      BlockInfo blocks[] = new BlockInfo[addCloseOp.blocks.length];
      for (int i = 0; i < addCloseOp.blocks.length; i++) {
        blocks[i] = new BlockInfo(addCloseOp.blocks[i], replication);
      }

      // Older versions of HDFS does not store the block size in inode.
      // If the file has more than one block, use the size of the
      // first block as the blocksize. Otherwise use the default
View Full Code Here

  @Override
  public LocatedBlockWithFileName getBlockInfo(long blockId)
      throws IOException {
    Block block = new Block(blockId);
    BlockInfo blockInfo = namesystem.blocksMap.getBlockInfo(block);
    if (null == blockInfo) {
      return null;
    }
   
    INodeFile inode = blockInfo.getINode();
    if (null == inode) {
      return null;
    }
   
    String fileName = inode.getFullPathName();
View Full Code Here

      INodeFileUnderConstruction file = checkLease(src, holder);

      Block[] blocks = file.getBlocks();
      if (blocks != null && blocks.length > 0) {
        Block last = blocks[blocks.length - 1];
        BlockInfo storedBlock = blocksMap.getStoredBlock(last);
        if (file.getPreferredBlockSize() > storedBlock.getNumBytes()) {
          long fileLength = file.computeContentSummary().getLength();         
          DatanodeDescriptor[] targets = new DatanodeDescriptor[blocksMap.numNodes(last)];
          Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(last);
          for (int i = 0; it != null && it.hasNext(); i++) {
            targets[i] = it.next();
          }
          // remove the replica locations of this block from the blocksMap
          for (int i = 0; i < targets.length; i++) {
            targets[i].removeBlock(storedBlock);
          }
          // set the locations of the last block in the lease record
          file.setLastBlock(storedBlock, targets);

          lb = createLocatedBlock(last, targets, fileLength - storedBlock.getNumBytes(),
              DataTransferProtocol.DATA_TRANSFER_VERSION, type);

          // Remove block from replication queue.
          neededReplications.remove(last, -1);
View Full Code Here

   * @param file the file's inode
   */
  private void replicateLastBlock(String src, INodeFileUnderConstruction file) {
    if (file.blocks == null || file.blocks.length == 0)
      return;
    BlockInfo block = file.blocks[file.blocks.length-1];
    DatanodeDescriptor[] targets = file.getTargets();
    final int numOfTargets = targets == null ? 0 : targets.length;
    NumberReplicas status = countNodes(block);
    int totalReplicas = status.getTotal();
    if (numOfTargets > totalReplicas) {
View Full Code Here

        throw new IOException("Cannot mark block" + blk.getBlockName() +
          " as corrupt because datanode " + dn.getName() +
          " does not exist. ");
      }

      final BlockInfo storedBlockInfo = blocksMap.getStoredBlock(blk);
      if (storedBlockInfo == null) {
        // Check if the replica is in the blockMap, if not
        // ignore the request for now. This could happen when BlockScanner
        // thread of Datanode reports bad block before Block reports are sent
        // by the Datanode on startup
        NameNode.stateChangeLog.info("BLOCK NameSystem.markBlockAsCorrupt: " +
          "block " + blk + " could not be marked " +
          "as corrupt as it does not exists in " +
          "blocksMap");
      } else {
        INodeFile inode = storedBlockInfo.getINode();
        if (inode == null) {
          NameNode.stateChangeLog.info("BLOCK NameSystem.markBlockAsCorrupt: " +
            "block " + blk + " could not be marked " +
            "as corrupt as it does not belong to " +
            "any file");
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.