Package org.apache.hadoop.hdfs.server.namenode

Examples of org.apache.hadoop.hdfs.server.namenode.INodeDirectory


                         int blocksPerFile, long startingBlockId,
                         FileNameGenerator nameGenerator) {
   
    PermissionStatus p = new PermissionStatus("joeDoe", "people",
                                      new FsPermission((short)0777));
    INodeDirectory dirInode = new INodeDirectory(p, 0L);
    editLog.logMkDir(BASE_PATH, dirInode);
    long blockSize = 10;
    BlockInfo[] blocks = new BlockInfo[blocksPerFile];
    for (int iB = 0; iB < blocksPerFile; ++iB) {
      blocks[iB] =
       new BlockInfo(new Block(0, blockSize, GenerationStamp.FIRST_VALID_STAMP),
                               replication);
    }
   
    long currentBlockId = startingBlockId;
    long bidAtSync = startingBlockId;

    for (int iF = 0; iF < numFiles; iF++) {
      for (int iB = 0; iB < blocksPerFile; ++iB) {
         blocks[iB].setBlockId(currentBlockId++);
      }

      try {

        INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
                      null, replication, 0, blockSize, blocks, p, "", "", null);
        // Append path to filename with information about blockIDs
        String path = "_" + iF + "_B" + blocks[0].getBlockId() +
                      "_to_B" + blocks[blocksPerFile-1].getBlockId() + "_";
        String filePath = nameGenerator.getNextFileName("");
        filePath = filePath + path;
        // Log the new sub directory in edits
        if ((iF % nameGenerator.getFilesPerDirectory())  == 0) {
          String currentDir = nameGenerator.getCurrentDir();
          dirInode = new INodeDirectory(p, 0L);
          editLog.logMkDir(currentDir, dirInode);
        }
        editLog.logOpenFile(filePath, inode);
        editLog.logCloseFile(filePath, inode);
View Full Code Here


    final Path file0 = new Path(dir, "file0");
    final Path file1 = new Path(dir, "file1");
    DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
   
    INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
    assertFalse(dirNode.isSnapshottable());
   
    hdfs.allowSnapshot(dir);
    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
    assertTrue(dirNode.isSnapshottable());
    // call allowSnapshot again
    hdfs.allowSnapshot(dir);
    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
    assertTrue(dirNode.isSnapshottable());
   
    // disallowSnapshot on dir
    hdfs.disallowSnapshot(dir);
    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
    assertFalse(dirNode.isSnapshottable());
    // do it again
    hdfs.disallowSnapshot(dir);
    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
    assertFalse(dirNode.isSnapshottable());
   
    // same process on root
   
    final Path root = new Path("/");
    INodeDirectory rootNode = fsdir.getINode4Write(root.toString())
        .asDirectory();
    assertTrue(rootNode.isSnapshottable());
    // root is snapshottable dir, but with 0 snapshot quota
    assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
   
    hdfs.allowSnapshot(root);
    rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
    assertTrue(rootNode.isSnapshottable());
    assertEquals(INodeDirectorySnapshottable.SNAPSHOT_LIMIT,
        ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
    // call allowSnapshot again
    hdfs.allowSnapshot(root);
    rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
    assertTrue(rootNode.isSnapshottable());
    assertEquals(INodeDirectorySnapshottable.SNAPSHOT_LIMIT,
        ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
   
    // disallowSnapshot on dir
    hdfs.disallowSnapshot(root);
    rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
    assertTrue(rootNode.isSnapshottable());
    assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
    // do it again
    hdfs.disallowSnapshot(root);
    rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
    assertTrue(rootNode.isSnapshottable());
    assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
  }
View Full Code Here

   */
  @Test (timeout=300000)
  public void testIdCmp() {
    final PermissionStatus perm = PermissionStatus.createImmutable(
        "user", "group", FsPermission.createImmutable((short)0));
    final INodeDirectory dir = new INodeDirectory(0,
        DFSUtil.string2Bytes("foo"), perm, 0L);
    final INodeDirectorySnapshottable snapshottable
        = new INodeDirectorySnapshottable(dir);
    final Snapshot[] snapshots = {
      new Snapshot(1, "s1", snapshottable),
View Full Code Here

    final Path file0 = new Path(dir, "file0");
    final Path file1 = new Path(dir, "file1");
    DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
   
    INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
    assertFalse(dirNode.isSnapshottable());
   
    hdfs.allowSnapshot(dir);
    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
    assertTrue(dirNode.isSnapshottable());
    // call allowSnapshot again
    hdfs.allowSnapshot(dir);
    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
    assertTrue(dirNode.isSnapshottable());
   
    // disallowSnapshot on dir
    hdfs.disallowSnapshot(dir);
    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
    assertFalse(dirNode.isSnapshottable());
    // do it again
    hdfs.disallowSnapshot(dir);
    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
    assertFalse(dirNode.isSnapshottable());
   
    // same process on root
   
    final Path root = new Path("/");
    INodeDirectory rootNode = fsdir.getINode4Write(root.toString())
        .asDirectory();
    assertTrue(rootNode.isSnapshottable());
    // root is snapshottable dir, but with 0 snapshot quota
    assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
   
    hdfs.allowSnapshot(root);
    rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
    assertTrue(rootNode.isSnapshottable());
    assertEquals(INodeDirectorySnapshottable.SNAPSHOT_LIMIT,
        ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
    // call allowSnapshot again
    hdfs.allowSnapshot(root);
    rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
    assertTrue(rootNode.isSnapshottable());
    assertEquals(INodeDirectorySnapshottable.SNAPSHOT_LIMIT,
        ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
   
    // disallowSnapshot on dir
    hdfs.disallowSnapshot(root);
    rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
    assertTrue(rootNode.isSnapshottable());
    assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
    // do it again
    hdfs.disallowSnapshot(root);
    rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
    assertTrue(rootNode.isSnapshottable());
    assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
  }
View Full Code Here

   
    // check the snapshot copy of noChangeDir
    Path snapshotNoChangeDir = SnapshotTestHelper.getSnapshotPath(dir, "s1",
        sub.getName() + "/" + noChangeDirParent.getName() + "/"
            + noChangeDir.getName());
    INodeDirectory snapshotNode =
        (INodeDirectory) fsdir.getINode(snapshotNoChangeDir.toString());
    // should still be an INodeDirectory
    assertEquals(INodeDirectory.class, snapshotNode.getClass());
    ReadOnlyList<INode> children = snapshotNode.getChildrenList(null);
    // check 2 children: noChangeFile and metaChangeFile2
    assertEquals(2, children.size());
    INode noChangeFileSCopy = children.get(1);
    assertEquals(noChangeFile.getName(), noChangeFileSCopy.getLocalName());
    assertEquals(INodeFile.class, noChangeFileSCopy.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,
        noChangeFileSCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
   
    INodeFileWithSnapshot metaChangeFile2SCopy =
        (INodeFileWithSnapshot) children.get(0);
    assertEquals(metaChangeFile2.getName(), metaChangeFile2SCopy.getLocalName());
    assertEquals(INodeFileWithSnapshot.class, metaChangeFile2SCopy.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,
        metaChangeFile2SCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
   
    // check the replication factor of metaChangeFile2SCopy
    assertEquals(REPLICATION_1,
        metaChangeFile2SCopy.getFileReplication(null));
    assertEquals(REPLICATION_1,
        metaChangeFile2SCopy.getFileReplication(snapshot1));
    assertEquals(REPLICATION,
        metaChangeFile2SCopy.getFileReplication(snapshot0));
   
    // Case 4: delete directory sub
    // before deleting sub, we first create a new file under sub
    Path newFile = new Path(sub, "newFile");
    DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile newFileNode = TestSnapshotBlocksMap.assertBlockCollection(
        newFile.toString(), 1, fsdir, blockmanager);
    blocks = newFileNode.getBlocks();
    checkQuotaUsageComputation(dir, 18L, BLOCKSIZE * REPLICATION * 5);
    hdfs.delete(sub, true);
    // while deletion, we add diff for subsub and metaChangeFile1, and remove
    // newFile
    checkQuotaUsageComputation(dir, 19L, BLOCKSIZE * REPLICATION * 4);
    for (BlockInfo b : blocks) {
      assertNull(blockmanager.getBlockCollection(b));
    }
   
    // make sure the whole subtree of sub is stored correctly in snapshot
    Path snapshotSub = SnapshotTestHelper.getSnapshotPath(dir, "s1",
        sub.getName());
    INodeDirectoryWithSnapshot snapshotNode4Sub =
        (INodeDirectoryWithSnapshot) fsdir.getINode(snapshotSub.toString());
    assertEquals(INodeDirectoryWithSnapshot.class, snapshotNode4Sub.getClass());
    // the snapshot copy of sub has only one child subsub.
    // newFile should have been destroyed
    assertEquals(1, snapshotNode4Sub.getChildrenList(null).size());
    // but should have two children, subsub and noChangeDir, when s1 was taken 
    assertEquals(2, snapshotNode4Sub.getChildrenList(snapshot1).size());
   
    // check the snapshot copy of subsub, which is contained in the subtree of
    // sub's snapshot copy
    INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList(null).get(0);
    assertEquals(INodeDirectoryWithSnapshot.class,
        snapshotNode4Subsub.getClass());
    assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent());
    // check the children of subsub
    INodeDirectory snapshotSubsubDir = (INodeDirectory) snapshotNode4Subsub;
    children = snapshotSubsubDir.getChildrenList(null);
    assertEquals(2, children.size());
    assertEquals(children.get(0).getLocalName(), metaChangeFile1.getName());
    assertEquals(children.get(1).getLocalName(), newFileAfterS0.getName());
    // only one child before snapshot s0
    children = snapshotSubsubDir.getChildrenList(snapshot0);
    assertEquals(1, children.size());
    INode child = children.get(0);
    assertEquals(child.getLocalName(), metaChangeFile1.getName());
    // check snapshot copy of metaChangeFile1
    assertEquals(INodeFileWithSnapshot.class, child.getClass());
View Full Code Here

    diffList = ((INodeDirectoryWithSnapshot) fsdir.getINode(
        metaChangeDir.toString())).getDiffs();
    assertEquals(0, diffList.asList().size());
   
    // check 2. noChangeDir and noChangeFile are still there
    final INodeDirectory noChangeDirNode =
        (INodeDirectory) fsdir.getINode(noChangeDir.toString());
    assertEquals(INodeDirectory.class, noChangeDirNode.getClass());
    final INodeFile noChangeFileNode =
        (INodeFile) fsdir.getINode(noChangeFile.toString());
    assertEquals(INodeFile.class, noChangeFileNode.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(noChangeFile.toString(), 1,
        fsdir, blockmanager);
View Full Code Here

    return w;
  }

  static INode newINode(int n, int width) {
    byte[] name = DFSUtil.string2Bytes(String.format("n%0" + width + "d", n));
    return new INodeDirectory(n, name, PERM, 0L);
  }
View Full Code Here

  static void modify(INode inode, final List<INode> current,
      Diff<byte[], INode> diff) {
    final int i = Diff.search(current, inode.getKey());
    Assert.assertTrue(i >= 0);
    final INodeDirectory oldinode = (INodeDirectory)current.get(i);
    final INodeDirectory newinode = new INodeDirectory(oldinode, false);
    newinode.setModificationTime(oldinode.getModificationTime() + 1);

    current.set(i, newinode);
    if (diff != null) {
      //test undo with 1/UNDO_TEST_P probability
      final boolean testUndo = RANDOM.nextInt(UNDO_TEST_P) == 0;
View Full Code Here

   *
   * @throws SnapshotException if there are snapshots in the directory.
   */
  public void resetSnapshottable(final String path) throws IOException {
    final INodesInPath iip = fsdir.getINodesInPath4Write(path);
    final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
    if (!d.isSnapshottable()) {
      // the directory is already non-snapshottable
      return;
    }
    final INodeDirectorySnapshottable s = (INodeDirectorySnapshottable) d;
    if (s.getNumSnapshots() > 0) {
View Full Code Here

  static final int SNAPSHOT_LIMIT = 1 << 16;

  /** Cast INode to INodeDirectorySnapshottable. */
  static public INodeDirectorySnapshottable valueOf(
      INode inode, String src) throws IOException {
    final INodeDirectory dir = INodeDirectory.valueOf(inode, src);
    if (!dir.isSnapshottable()) {
      throw new SnapshotException(
          "Directory is not a snapshottable directory: " + src);
    }
    return (INodeDirectorySnapshottable)dir;
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.INodeDirectory

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.