Package org.apache.hadoop.hdfs.server.namenode

Examples of org.apache.hadoop.hdfs.server.namenode.INode


      final int i = search(list, oldChild.getLocalNameBytes());
      if (i < 0 || list.get(i).getId() != oldChild.getId()) {
        return false;
      }

      final INode removed = list.set(i, newChild);
      Preconditions.checkState(removed == oldChild);
      return true;
    }
View Full Code Here


  public static INode loadCreated(byte[] createdNodeName,
      INodeDirectory parent) throws IOException {
    // the INode in the created list should be a reference to another INode
    // in posterior SnapshotDiffs or one of the current children
    for (DirectoryDiff postDiff : parent.getDiffs()) {
      final INode d = postDiff.getChildrenDiff().search(ListType.DELETED,
          createdNodeName);
      if (d != null) {
        return d;
      } // else go to the next SnapshotDiff
    }
    // use the current child
    INode currentChild = parent.getChild(createdNodeName,
        Snapshot.CURRENT_STATE_ID);
    if (currentChild == null) {
      throw new IOException("Cannot find an INode associated with the INode "
          + DFSUtil.bytes2String(createdNodeName)
          + " in created list while loading FSImage.");
View Full Code Here

    // read the size of the created list
    int createdSize = in.readInt();
    List<INode> createdList = new ArrayList<INode>(createdSize);
    for (int i = 0; i < createdSize; i++) {
      byte[] createdNodeName = FSImageSerialization.readLocalName(in);
      INode created = loadCreated(createdNodeName, parent);
      createdList.add(created);
    }
    return createdList;
  }
View Full Code Here

      List<INode> createdList, DataInput in, FSImageFormat.Loader loader)
      throws IOException {
    int deletedSize = in.readInt();
    List<INode> deletedList = new ArrayList<INode>(deletedSize);
    for (int i = 0; i < deletedSize; i++) {
      final INode deleted = loader.loadINodeWithLocalName(true, in, true);
      deletedList.add(deleted);
      // set parent: the parent field of an INode in the deleted list is not
      // useful, but set the parent here to be consistent with the original
      // fsdir tree.
      deleted.setParent(parent);
      if (deleted.isFile()) {
        loader.updateBlocksMap(deleted.asFile());
      }
    }
    return deletedList;
  }
View Full Code Here

    private final Map<Long, Long> dirMap = new HashMap<Long, Long>();

    public void writeINodeReferenceWithCount(
        INodeReference.WithCount withCount, DataOutput out,
        boolean writeUnderConstruction) throws IOException {
      final INode referred = withCount.getReferredINode();
      final long id = withCount.getId();
      final boolean firstReferred = !referenceMap.containsKey(id);
      out.writeBoolean(firstReferred);

      if (firstReferred) {
View Full Code Here

        ) throws IOException {
      final boolean firstReferred = in.readBoolean();

      final INodeReference.WithCount withCount;
      if (firstReferred) {
        final INode referred = loader.loadINodeWithLocalName(isSnapshotINode,
            in, true);
        withCount = new INodeReference.WithCount(null, referred);
        referenceMap.put(withCount.getId(), withCount);
      } else {
        final long id = in.readLong();
View Full Code Here

   * However, we should include it in our snapshot diff report as rename only
   * if the rename target is also under the same snapshottable directory.
   */
  private byte[][] findRenameTargetPath(INodeReference.WithName wn,
      final int snapshotId) {
    INode inode = wn.getReferredINode();
    final LinkedList<byte[]> ancestors = Lists.newLinkedList();
    while (inode != null) {
      if (inode == this) {
        return ancestors.toArray(new byte[ancestors.size()][]);
      }
      if (inode instanceof INodeReference.WithCount) {
        inode = ((WithCount) inode).getParentRef(snapshotId);
      } else {
        INode parent = inode.getParentReference() != null ? inode
            .getParentReference() : inode.getParent();
        if (parent != null && parent instanceof INodeDirectory) {
          int sid = parent.asDirectory().searchChild(inode);
          if (sid < snapshotId) {
            return null;
          }
        }
        if (!(parent instanceof WithCount)) {
View Full Code Here

      // It's possible that the block was removed because of a datanode
      // failure. If the block is still valid, check if replication is
      // necessary. In that case, put block on a possibly-will-
      // be-replicated list.
      //
      INode fileINode = blocksMap.getINode(block);
      if (fileINode != null) {
        namesystem.decrementSafeBlockCount(block);
        updateNeededReplications(block, -1, 0);
      }
View Full Code Here

  private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode,
      NumberReplicas num) {
    int curReplicas = num.liveReplicas();
    int curExpectedReplicas = getReplication(block);
    INode fileINode = blocksMap.getINode(block);
    Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block);
    StringBuilder nodeList = new StringBuilder();
    while (nodeIter.hasNext()) {
      DatanodeDescriptor node = nodeIter.next();
      nodeList.append(node.name);
      nodeList.append(" ");
    }
    LOG.info("Block: " + block + ", Expected Replicas: "
        + curExpectedReplicas + ", live replicas: " + curReplicas
        + ", corrupt replicas: " + num.corruptReplicas()
        + ", decommissioned replicas: " + num.decommissionedReplicas()
        + ", excess replicas: " + num.excessReplicas()
        + ", Is Open File: " + fileINode.isUnderConstruction()
        + ", Datanodes having this block: " + nodeList + ", Current Datanode: "
        + srcNode.name + ", Is current datanode decommissioning: "
        + srcNode.isDecommissionInProgress());
  }
View Full Code Here

    int decommissionOnlyReplicas = 0;
    int underReplicatedInOpenFiles = 0;
    final Iterator<? extends Block> it = srcNode.getBlockIterator();
    while(it.hasNext()) {
      final Block block = it.next();
      INode fileINode = blocksMap.getINode(block);

      if (fileINode != null) {
        NumberReplicas num = countNodes(block);
        int curReplicas = num.liveReplicas();
        int curExpectedReplicas = getReplication(block);
        if (isNeededReplication(block, curExpectedReplicas, curReplicas)) {
          if (curExpectedReplicas > curReplicas) {
            //Log info about one block for this node which needs replication
            if (!status) {
              status = true;
              logBlockReplicationInfo(block, srcNode, num);
            }
            underReplicatedBlocks++;
            if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
              decommissionOnlyReplicas++;
            }
            if (fileINode.isUnderConstruction()) {
              underReplicatedInOpenFiles++;
            }
          }
          if (!neededReplications.contains(block) &&
            pendingReplications.getNumReplicas(block) == 0) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.INode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.