Package org.apache.hadoop.hdfs.server.namenode

Examples of org.apache.hadoop.hdfs.server.namenode.INodeFile$HeaderFormat


                              DatanodeDescriptor addedNode,
                              DatanodeDescriptor delNodeHint,
                              BlockPlacementPolicy replicator) {
    assert namesystem.hasWriteLock();
    // first form a rack to datanodes map and
    INodeFile inode = getINode(b);
    final Map<String, List<DatanodeDescriptor>> rackMap
        = new HashMap<String, List<DatanodeDescriptor>>();
    for(final Iterator<DatanodeDescriptor> iter = nonExcess.iterator();
        iter.hasNext(); ) {
      final DatanodeDescriptor node = iter.next();
View Full Code Here


      final DatanodeDescriptor srcNode) {
    final Iterator<? extends Block> it = srcNode.getBlockIterator();
    int numOverReplicated = 0;
    while(it.hasNext()) {
      final Block block = it.next();
      INodeFile fileINode = blocksMap.getINode(block);
      short expectedReplication = fileINode.getReplication();
      NumberReplicas num = countNodes(block);
      int numCurrentReplica = num.liveReplicas();
      if (numCurrentReplica > expectedReplication) {
        // over-replicated block
        processOverReplicatedBlock(block, expectedReplication, null, null);
View Full Code Here

    }
  }

  /* get replication factor of a block */
  private int getReplication(Block block) {
    INodeFile fileINode = blocksMap.getINode(block);
    if (fileINode == null) { // block does not belong to any file
      return 0;
    }
    assert !fileINode.isDirectory() : "Block cannot belong to a directory.";
    return fileINode.getReplication();
  }
View Full Code Here

        assert containingLiveReplicasNodes.size() == numReplicas.liveReplicas();
        int usableReplicas = numReplicas.liveReplicas() +
                             numReplicas.decommissionedReplicas();
      
        if (block instanceof BlockInfo) {
          INodeFile inodeFile = ((BlockInfo) block).getINode();
          String fileName = (inodeFile == null) ? "[orphaned]"
            : inodeFile.getFullPathName();
          out.print(fileName + ": ");
        }
        // l: == live:, d: == decommissioned c: == corrupt e: == excess
        out.print(block + ((usableReplicas > 0)? "" : " MISSING") +
                  " (replicas:" +
View Full Code Here

                            storedBlock.getBlockName() +
                            " as corrupt because datanode " + dn.getName() +
                            " does not exist. ");
    }

    INodeFile inode = storedBlock.getINode();
    if (inode == null) {
      blockLog.info("BLOCK markBlockAsCorrupt: " +
                                   "block " + storedBlock +
                                   " could not be marked as corrupt as it" +
                                   " does not belong to any file");
      addToInvalidates(storedBlock, node);
      return;
    }

    // Add replica to the data-node if it is not already there
    node.addBlock(storedBlock);

    // Add this replica to corruptReplicas Map
    corruptReplicas.addToCorruptReplicasMap(storedBlock, node, reason);
    if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) {
      // the block is over-replicated so invalidate the replicas immediately
      invalidateBlock(storedBlock, node);
    } else if (namesystem.isPopulatingReplQueues()) {
      // add the block to neededReplication
      updateNeededReplications(storedBlock, -1, 0);
View Full Code Here

  @VisibleForTesting
  int computeReplicationWorkForBlocks(List<List<Block>> blocksToReplicate) {
    int requiredReplication, numEffectiveReplicas;
    List<DatanodeDescriptor> containingNodes, liveReplicaNodes;
    DatanodeDescriptor srcNode;
    INodeFile fileINode = null;
    int additionalReplRequired;

    int scheduledWork = 0;
    List<ReplicationWork> work = new LinkedList<ReplicationWork>();

    namesystem.writeLock();
    try {
      synchronized (neededReplications) {
        for (int priority = 0; priority < blocksToReplicate.size(); priority++) {
          for (Block block : blocksToReplicate.get(priority)) {
            // block should belong to a file
            fileINode = blocksMap.getINode(block);
            // abandoned block or block reopened for append
            if(fileINode == null || fileINode.isUnderConstruction()) {
              neededReplications.remove(block, priority); // remove from neededReplications
              neededReplications.decrementReplicationIndex(priority);
              continue;
            }

            requiredReplication = fileINode.getReplication();

            // get a source data-node
            containingNodes = new ArrayList<DatanodeDescriptor>();
            liveReplicaNodes = new ArrayList<DatanodeDescriptor>();
            NumberReplicas numReplicas = new NumberReplicas();
            srcNode = chooseSourceDatanode(
                block, containingNodes, liveReplicaNodes, numReplicas, priority);
            if(srcNode == null) // block can not be replicated from any node
              continue;

            assert liveReplicaNodes.size() == numReplicas.liveReplicas();
            // do not schedule more if enough replicas is already pending
            numEffectiveReplicas = numReplicas.liveReplicas() +
                                    pendingReplications.getNumReplicas(block);
     
            if (numEffectiveReplicas >= requiredReplication) {
              if ( (pendingReplications.getNumReplicas(block) > 0) ||
                   (blockHasEnoughRacks(block)) ) {
                neededReplications.remove(block, priority); // remove from neededReplications
                neededReplications.decrementReplicationIndex(priority);
                blockLog.info("BLOCK* "
                    + "Removing block " + block
                    + " from neededReplications as it has enough replicas.");
                continue;
              }
            }

            if (numReplicas.liveReplicas() < requiredReplication) {
              additionalReplRequired = requiredReplication
                  - numEffectiveReplicas;
            } else {
              additionalReplRequired = 1; // Needed on a new rack
            }
            work.add(new ReplicationWork(block, fileINode, srcNode,
                containingNodes, liveReplicaNodes, additionalReplRequired,
                priority));
          }
        }
      }
    } finally {
      namesystem.writeUnlock();
    }

    HashMap<Node, Node> excludedNodes
        = new HashMap<Node, Node>();
    for(ReplicationWork rw : work){
      // Exclude all of the containing nodes from being targets.
      // This list includes decommissioning or corrupt nodes.
      excludedNodes.clear();
      for (DatanodeDescriptor dn : rw.containingNodes) {
        excludedNodes.put(dn, dn);
      }

      // choose replication targets: NOT HOLDING THE GLOBAL LOCK
      // It is costly to extract the filename for which chooseTargets is called,
      // so for now we pass in the Inode itself.
      rw.targets = blockplacement.chooseTarget(rw.fileINode,
          rw.additionalReplRequired, rw.srcNode, rw.liveReplicaNodes,
          excludedNodes, rw.block.getNumBytes());
    }

    namesystem.writeLock();
    try {
      for(ReplicationWork rw : work){
        DatanodeDescriptor[] targets = rw.targets;
        if(targets == null || targets.length == 0){
          rw.targets = null;
          continue;
        }

        synchronized (neededReplications) {
          Block block = rw.block;
          int priority = rw.priority;
          // Recheck since global lock was released
          // block should belong to a file
          fileINode = blocksMap.getINode(block);
          // abandoned block or block reopened for append
          if(fileINode == null || fileINode.isUnderConstruction()) {
            neededReplications.remove(block, priority); // remove from neededReplications
            rw.targets = null;
            neededReplications.decrementReplicationIndex(priority);
            continue;
          }
          requiredReplication = fileINode.getReplication();

          // do not schedule more if enough replicas is already pending
          NumberReplicas numReplicas = countNodes(block);
          numEffectiveReplicas = numReplicas.liveReplicas() +
            pendingReplications.getNumReplicas(block);
View Full Code Here

  }

  private Collection<LocatedBlock> getCompanionBlocks(
      FSNamesystem namesystem, BlockPlacementPolicyRaid policy,
      ExtendedBlock block) throws IOException {
    INodeFile inode = (INodeFile)blockManager.blocksMap.getBlockCollection(block
        .getLocalBlock());
    FileType type = policy.getFileType(inode.getFullPathName());
    return policy.getCompanionBlocks(inode.getFullPathName(), type,
        block.getLocalBlock());
  }
View Full Code Here

    }
   
    // check if the inode of the file is under construction
    @Override
    boolean checkNamenodeBeforeReturn() throws Exception {
      INodeFile fileNode = cluster.getNameNode(0).getNamesystem()
          .getFSDirectory().getINode4Write(fileName).asFile();
      boolean fileIsUC = fileNode.isUnderConstruction();
      for (int i = 0; i < CHECKTIMES && !fileIsUC; i++) {
        Thread.sleep(1000);
        fileNode = cluster.getNameNode(0).getNamesystem().getFSDirectory()
            .getINode4Write(fileName).asFile();
        fileIsUC = fileNode.isUnderConstruction();
      }
      return fileIsUC;
    }
View Full Code Here

    // after creating snapshot s0, create a directory tempdir under dir and then
    // delete dir immediately
    Path tempDir = new Path(dir, "tempdir");
    Path tempFile = new Path(tempDir, "tempfile");
    DFSTestUtil.createFile(hdfs, tempFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile temp = TestSnapshotBlocksMap.assertBlockCollection(
        tempFile.toString(), 1, fsdir, blockmanager);
    BlockInfo[] blocks = temp.getBlocks();
    hdfs.delete(tempDir, true);
    // check dir's quota usage
    checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 3);
    // check blocks of tempFile
    for (BlockInfo b : blocks) {
      assertNull(blockmanager.getBlockCollection(b));
    }
   
    // make a change: create a new file under subsub
    Path newFileAfterS0 = new Path(subsub, "newFile");
    DFSTestUtil.createFile(hdfs, newFileAfterS0, BLOCKSIZE, REPLICATION, seed);
    // further change: change the replicator factor of metaChangeFile
    hdfs.setReplication(metaChangeFile1, REPLICATION_1);
    hdfs.setReplication(metaChangeFile2, REPLICATION_1);
   
    // create snapshot s1
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
    // check dir's quota usage
    checkQuotaUsageComputation(dir, 14L, BLOCKSIZE * REPLICATION * 4);
   
    // get two snapshots for later use
    Snapshot snapshot0 = ((INodeDirectorySnapshottable) fsdir.getINode(dir
        .toString())).getSnapshot(DFSUtil.string2Bytes("s0"));
    Snapshot snapshot1 = ((INodeDirectorySnapshottable) fsdir.getINode(dir
        .toString())).getSnapshot(DFSUtil.string2Bytes("s1"));
   
    // Case 2 + Case 3: delete noChangeDirParent, noChangeFile, and
    // metaChangeFile2. Note that when we directly delete a directory, the
    // directory will be converted to an INodeDirectoryWithSnapshot. To make
    // sure the deletion goes through an INodeDirectory, we delete the parent
    // of noChangeDir
    hdfs.delete(noChangeDirParent, true);
    // while deletion, we add a diff for metaChangeFile2 as its snapshot copy
    // for s1, we also add diffs for both sub and noChangeDirParent
    checkQuotaUsageComputation(dir, 17L, BLOCKSIZE * REPLICATION * 4);
   
    // check the snapshot copy of noChangeDir
    Path snapshotNoChangeDir = SnapshotTestHelper.getSnapshotPath(dir, "s1",
        sub.getName() + "/" + noChangeDirParent.getName() + "/"
            + noChangeDir.getName());
    INodeDirectory snapshotNode =
        (INodeDirectory) fsdir.getINode(snapshotNoChangeDir.toString());
    // should still be an INodeDirectory
    assertEquals(INodeDirectory.class, snapshotNode.getClass());
    ReadOnlyList<INode> children = snapshotNode.getChildrenList(null);
    // check 2 children: noChangeFile and metaChangeFile2
    assertEquals(2, children.size());
    INode noChangeFileSCopy = children.get(1);
    assertEquals(noChangeFile.getName(), noChangeFileSCopy.getLocalName());
    assertEquals(INodeFile.class, noChangeFileSCopy.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,
        noChangeFileSCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
   
    INodeFileWithSnapshot metaChangeFile2SCopy =
        (INodeFileWithSnapshot) children.get(0);
    assertEquals(metaChangeFile2.getName(), metaChangeFile2SCopy.getLocalName());
    assertEquals(INodeFileWithSnapshot.class, metaChangeFile2SCopy.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,
        metaChangeFile2SCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
   
    // check the replication factor of metaChangeFile2SCopy
    assertEquals(REPLICATION_1,
        metaChangeFile2SCopy.getFileReplication(null));
    assertEquals(REPLICATION_1,
        metaChangeFile2SCopy.getFileReplication(snapshot1));
    assertEquals(REPLICATION,
        metaChangeFile2SCopy.getFileReplication(snapshot0));
   
    // Case 4: delete directory sub
    // before deleting sub, we first create a new file under sub
    Path newFile = new Path(sub, "newFile");
    DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile newFileNode = TestSnapshotBlocksMap.assertBlockCollection(
        newFile.toString(), 1, fsdir, blockmanager);
    blocks = newFileNode.getBlocks();
    checkQuotaUsageComputation(dir, 18L, BLOCKSIZE * REPLICATION * 5);
    hdfs.delete(sub, true);
    // while deletion, we add diff for subsub and metaChangeFile1, and remove
    // newFile
    checkQuotaUsageComputation(dir, 19L, BLOCKSIZE * REPLICATION * 4);
 
View Full Code Here

    Path toDeleteFile = new Path(metaChangeDir, "toDeleteFile");
    DFSTestUtil.createFile(hdfs, noChangeFile, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, metaChangeFile, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, toDeleteFile, BLOCKSIZE, REPLICATION, seed);
   
    final INodeFile toDeleteFileNode = TestSnapshotBlocksMap
        .assertBlockCollection(toDeleteFile.toString(), 1, fsdir, blockmanager);
    BlockInfo[] blocks = toDeleteFileNode.getBlocks();
   
    // create snapshot s0 on dir
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    checkQuotaUsageComputation(dir, 8, 3 * BLOCKSIZE * REPLICATION);
   
    // delete /TestSnapshot/sub/noChangeDir/metaChangeDir/toDeleteFile
    hdfs.delete(toDeleteFile, true);
    // the deletion adds diff of toDeleteFile and metaChangeDir
    checkQuotaUsageComputation(dir, 10, 3 * BLOCKSIZE * REPLICATION);
    // change metadata of /TestSnapshot/sub/noChangeDir/metaChangeDir and
    // /TestSnapshot/sub/noChangeDir/metaChangeFile
    hdfs.setReplication(metaChangeFile, REPLICATION_1);
    hdfs.setOwner(metaChangeDir, "unknown", "unknown");
    checkQuotaUsageComputation(dir, 11, 3 * BLOCKSIZE * REPLICATION);
   
    // create snapshot s1 on dir
    hdfs.createSnapshot(dir, "s1");
    checkQuotaUsageComputation(dir, 12, 3 * BLOCKSIZE * REPLICATION);
   
    // delete snapshot s0
    hdfs.deleteSnapshot(dir, "s0");
    // namespace: remove toDeleteFile and its diff, metaChangeFile's diff,
    // metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and
    // metaChangeFile's replication factor decreases
    checkQuotaUsageComputation(dir, 7, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
    for (BlockInfo b : blocks) {
      assertNull(blockmanager.getBlockCollection(b));
    }
   
    // check 1. there is no snapshot s0
    final INodeDirectorySnapshottable dirNode =
        (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
    Snapshot snapshot0 = dirNode.getSnapshot(DFSUtil.string2Bytes("s0"));
    assertNull(snapshot0);
    DirectoryDiffList diffList = dirNode.getDiffs();
    assertEquals(1, diffList.asList().size());
    assertEquals("s1", diffList.getLast().snapshot.getRoot().getLocalName());
    diffList = ((INodeDirectoryWithSnapshot) fsdir.getINode(
        metaChangeDir.toString())).getDiffs();
    assertEquals(0, diffList.asList().size());
   
    // check 2. noChangeDir and noChangeFile are still there
    final INodeDirectory noChangeDirNode =
        (INodeDirectory) fsdir.getINode(noChangeDir.toString());
    assertEquals(INodeDirectory.class, noChangeDirNode.getClass());
    final INodeFile noChangeFileNode =
        (INodeFile) fsdir.getINode(noChangeFile.toString());
    assertEquals(INodeFile.class, noChangeFileNode.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(noChangeFile.toString(), 1,
        fsdir, blockmanager);
   
    // check 3: current metadata of metaChangeFile and metaChangeDir
    FileStatus status = hdfs.getFileStatus(metaChangeDir);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.INodeFile$HeaderFormat

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.