Examples of INodeFile


Examples of org.apache.hadoop.hdfs.server.namenode.INodeFile

    Path toDeleteFile = new Path(metaChangeDir, "toDeleteFile");
    DFSTestUtil.createFile(hdfs, noChangeFile, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, metaChangeFile, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, toDeleteFile, BLOCKSIZE, REPLICATION, seed);
   
    final INodeFile toDeleteFileNode = TestSnapshotBlocksMap
        .assertBlockCollection(toDeleteFile.toString(), 1, fsdir, blockmanager);
    BlockInfo[] blocks = toDeleteFileNode.getBlocks();
   
    // create snapshot s0 on dir
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    checkQuotaUsageComputation(dir, 8, 3 * BLOCKSIZE * REPLICATION);
   
    // delete /TestSnapshot/sub/noChangeDir/metaChangeDir/toDeleteFile
    hdfs.delete(toDeleteFile, true);
    // the deletion adds diff of toDeleteFile and metaChangeDir
    checkQuotaUsageComputation(dir, 10, 3 * BLOCKSIZE * REPLICATION);
    // change metadata of /TestSnapshot/sub/noChangeDir/metaChangeDir and
    // /TestSnapshot/sub/noChangeDir/metaChangeFile
    hdfs.setReplication(metaChangeFile, REPLICATION_1);
    hdfs.setOwner(metaChangeDir, "unknown", "unknown");
    checkQuotaUsageComputation(dir, 11, 3 * BLOCKSIZE * REPLICATION);
   
    // create snapshot s1 on dir
    hdfs.createSnapshot(dir, "s1");
    checkQuotaUsageComputation(dir, 12, 3 * BLOCKSIZE * REPLICATION);
   
    // delete snapshot s0
    hdfs.deleteSnapshot(dir, "s0");
    // namespace: remove toDeleteFile and its diff, metaChangeFile's diff,
    // metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and
    // metaChangeFile's replication factor decreases
    checkQuotaUsageComputation(dir, 7, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
    for (BlockInfo b : blocks) {
      assertNull(blockmanager.getBlockCollection(b));
    }
   
    // check 1. there is no snapshot s0
    final INodeDirectorySnapshottable dirNode =
        (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
    Snapshot snapshot0 = dirNode.getSnapshot(DFSUtil.string2Bytes("s0"));
    assertNull(snapshot0);
    DirectoryDiffList diffList = dirNode.getDiffs();
    assertEquals(1, diffList.asList().size());
    assertEquals("s1", diffList.getLast().snapshot.getRoot().getLocalName());
    diffList = ((INodeDirectoryWithSnapshot) fsdir.getINode(
        metaChangeDir.toString())).getDiffs();
    assertEquals(0, diffList.asList().size());
   
    // check 2. noChangeDir and noChangeFile are still there
    final INodeDirectory noChangeDirNode =
        (INodeDirectory) fsdir.getINode(noChangeDir.toString());
    assertEquals(INodeDirectory.class, noChangeDirNode.getClass());
    final INodeFile noChangeFileNode =
        (INodeFile) fsdir.getINode(noChangeFile.toString());
    assertEquals(INodeFile.class, noChangeFileNode.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(noChangeFile.toString(), 1,
        fsdir, blockmanager);
   
    // check 3: current metadata of metaChangeFile and metaChangeDir
    FileStatus status = hdfs.getFileStatus(metaChangeDir);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeFile

        modDirStr + "file15");
    FileStatus statusBeforeDeletion10 = hdfs.getFileStatus(file10_s1);
    FileStatus statusBeforeDeletion11 = hdfs.getFileStatus(file11_s1);
    FileStatus statusBeforeDeletion12 = hdfs.getFileStatus(file12_s1);
    FileStatus statusBeforeDeletion13 = hdfs.getFileStatus(file13_s1);
    INodeFile file14Node = TestSnapshotBlocksMap.assertBlockCollection(
        file14_s2.toString(), 1, fsdir, blockmanager);
    BlockInfo[] blocks_14 = file14Node.getBlocks();
    TestSnapshotBlocksMap.assertBlockCollection(file15_s2.toString(), 1, fsdir,
        blockmanager);
   
    // delete s2, in which process we need to combine the diff in s2 to s1
    hdfs.deleteSnapshot(snapshotRoot, "s2");
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 12 + delta,
        14 * BLOCKSIZE);
   
    // check the correctness of s1
    FileStatus statusAfterDeletion10 = hdfs.getFileStatus(file10_s1);
    FileStatus statusAfterDeletion11 = hdfs.getFileStatus(file11_s1);
    FileStatus statusAfterDeletion12 = hdfs.getFileStatus(file12_s1);
    FileStatus statusAfterDeletion13 = hdfs.getFileStatus(file13_s1);
    assertEquals(statusBeforeDeletion10.toString(),
        statusAfterDeletion10.toString());
    assertEquals(statusBeforeDeletion11.toString(),
        statusAfterDeletion11.toString());
    assertEquals(statusBeforeDeletion12.toString(),
        statusAfterDeletion12.toString());
    assertEquals(statusBeforeDeletion13.toString(),
        statusAfterDeletion13.toString());
    TestSnapshotBlocksMap.assertBlockCollection(file10_s1.toString(), 1, fsdir,
        blockmanager);
    TestSnapshotBlocksMap.assertBlockCollection(file11_s1.toString(), 1, fsdir,
        blockmanager);
    TestSnapshotBlocksMap.assertBlockCollection(file12_s1.toString(), 1, fsdir,
        blockmanager);
    TestSnapshotBlocksMap.assertBlockCollection(file13_s1.toString(), 1, fsdir,
        blockmanager);
   
    // make sure file14 and file15 are not included in s1
    Path file14_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
        modDirStr + "file14");
    Path file15_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
        modDirStr + "file15");
    assertFalse(hdfs.exists(file14_s1));
    assertFalse(hdfs.exists(file15_s1));
    for (BlockInfo b : blocks_14) {
      assertNull(blockmanager.getBlockCollection(b));
    }
   
    INodeFile nodeFile13 = (INodeFile) fsdir.getINode(file13.toString());
    assertEquals(REPLICATION_1, nodeFile13.getBlockReplication());
    TestSnapshotBlocksMap.assertBlockCollection(file13.toString(), 1, fsdir,
        blockmanager);
   
    INodeFile nodeFile12 = (INodeFile) fsdir.getINode(file12_s1.toString());
    assertEquals(REPLICATION_1, nodeFile12.getBlockReplication());
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeFile

    // stored in the deleted list of foo, and will be destroyed.
    hdfs.delete(foo2, true);
   
    // check if /dir3/bar still exists
    assertTrue(hdfs.exists(bar3));
    INodeFile barNode = (INodeFile) fsdir.getINode4Write(bar3.toString());
    assertSame(fsdir.getINode4Write(dir3.toString()), barNode.getParent());
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeFile

      hdfs.setReplication(filePath, (short) DATANODE_COUNT);
      BlockManagerTestUtil.computeAllPendingWork(blkManager);

      assertEquals(1, blkManager.pendingReplications.size());
      INodeFile fileNode = fsn.getFSDirectory().getINode4Write(file).asFile();
      Block[] blocks = fileNode.getBlocks();
      assertEquals(DATANODE_COUNT - 1,
          blkManager.pendingReplications.getNumReplicas(blocks[0]));

      LocatedBlock locatedBlock = hdfs.getClient().getLocatedBlocks(file, 0)
          .get(0);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeFile

    // 1. create snapshot --> create file --> append
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
   
    INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
   
    // 2. create snapshot --> modify the file --> append
    hdfs.createSnapshot(dir, "s1");
    hdfs.setReplication(file, (short) (REPLICATION - 1));
    DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
   
    // check corresponding inodes
    fileNode = (INodeFile) fsdir.getINode(file.toString());
    assertEquals(REPLICATION - 1, fileNode.getFileReplication());
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize());

    // 3. create snapshot --> append
    hdfs.createSnapshot(dir, "s2");
    DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
   
    // check corresponding inodes
    fileNode = (INodeFile) fsdir.getINode(file.toString());
    assertEquals(REPLICATION - 1,  fileNode.getFileReplication());
    assertEquals(BLOCKSIZE * 4, fileNode.computeFileSize());
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeFile

    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    out.close();
   
    // check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's
    // deleted list, with size BLOCKSIZE*2
    INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
    assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize());
    INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
        .getINode(dir.toString());
    DirectoryDiff last = dirNode.getDiffs().getLast();
    Snapshot s0 = last.snapshot;
   
    // 2. append without closing stream
    out = appendFileWithoutClosing(file, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
   
    // re-check nodeInDeleted_S0
    dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
    assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(s0));
   
    // 3. take snapshot --> close stream
    hdfs.createSnapshot(dir, "s1");
    out.close();
   
    // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
    // have been stored in s1's deleted list
    fileNode = (INodeFile) fsdir.getINode(file.toString());
    dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
    last = dirNode.getDiffs().getLast();
    Snapshot s1 = last.snapshot;
    assertTrue(fileNode instanceof INodeFileWithSnapshot);
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1));
   
    // 4. modify file --> append without closing stream --> take snapshot -->
    // close stream
    hdfs.setReplication(file, (short) (REPLICATION - 1));
    out = appendFileWithoutClosing(file, BLOCKSIZE);
    hdfs.createSnapshot(dir, "s2");
    out.close();
   
    // re-check the size of nodeInDeleted_S1
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1));
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeFile

   * @throws Exception
   */
  private void checkSnapshotFileReplication(Path currentFile,
      Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
    // First check the getBlockReplication for the INode of the currentFile
    final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
    assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
    // Then check replication for every snapshot
    for (Path ss : snapshotRepMap.keySet()) {
      final INodesInPath iip = fsdir.getLastINodeInPath(ss.toString());
      final INodeFile ssInode = (INodeFile)iip.getLastINode();
      // The replication number derived from the
      // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
      assertEquals(expectedBlockRep, ssInode.getBlockReplication());
      // Also check the number derived from INodeFile#getFileReplication
      assertEquals(snapshotRepMap.get(ss).shortValue(),
          ssInode.getFileReplication(iip.getPathSnapshot()));
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeFile

   
    // Delete file1
    hdfs.delete(file1, true);
    // Check replication of snapshots
    for (Path ss : snapshotRepMap.keySet()) {
      final INodeFile ssInode = getINodeFile(ss);
      // The replication number derived from the
      // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
      assertEquals(REPLICATION, ssInode.getBlockReplication());
      // Also check the number derived from INodeFile#getFileReplication
      assertEquals(snapshotRepMap.get(ss).shortValue(),
          ssInode.getFileReplication());
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeFile

    }
  }

  static INodeFile assertBlockCollection(String path, int numBlocks,
     final FSDirectory dir, final BlockManager blkManager) throws Exception {
    final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
    assertEquals(numBlocks, file.getBlocks().length);
    for(BlockInfo b : file.getBlocks()) {
      assertBlockCollection(blkManager, file, b);
    }
    return file;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.INodeFile

    DFSTestUtil.createFile(hdfs, file1, 2*BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, file2, 3*BLOCKSIZE, REPLICATION, seed);
   
    // Normal deletion
    {
      final INodeFile f2 = assertBlockCollection(file2.toString(), 3, fsdir,
          blockmanager);
      BlockInfo[] blocks = f2.getBlocks();
      hdfs.delete(sub2, true);
      // The INode should have been removed from the blocksMap
      for(BlockInfo b : blocks) {
        assertNull(blockmanager.getBlockCollection(b));
      }
    }
   
    // Create snapshots for sub1
    final String[] snapshots = {"s0", "s1", "s2"};
    DFSTestUtil.createFile(hdfs, file3, 5*BLOCKSIZE, REPLICATION, seed);
    SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[0]);
    DFSTestUtil.createFile(hdfs, file4, 1*BLOCKSIZE, REPLICATION, seed);
    SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[1]);
    DFSTestUtil.createFile(hdfs, file5, 7*BLOCKSIZE, REPLICATION, seed);
    SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[2]);

    // set replication so that the inode should be replaced for snapshots
    {
      INodeFile f1 = assertBlockCollection(file1.toString(), 2, fsdir,
          blockmanager);
      Assert.assertSame(INodeFile.class, f1.getClass());
      hdfs.setReplication(file1, (short)2);
      f1 = assertBlockCollection(file1.toString(), 2, fsdir, blockmanager);
      Assert.assertSame(INodeFileWithSnapshot.class, f1.getClass());
    }
   
    // Check the block information for file0
    final INodeFile f0 = assertBlockCollection(file0.toString(), 4, fsdir,
        blockmanager);
    BlockInfo[] blocks0 = f0.getBlocks();
   
    // Also check the block information for snapshot of file0
    Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0",
        file0.getName());
    assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.