Examples of DistributedFileSystem


Examples of org.apache.hadoop.hdfs.DistributedFileSystem

  @Test
  public void testSaveNamespaceWithRenamedLease() throws Exception {
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
        .numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
    OutputStream out = null;
    try {
      fs.mkdirs(new Path("/test-target"));
      out = fs.create(new Path("/test-source/foo")); // don't close
      fs.rename(new Path("/test-source/"), new Path("/test-target/"));

      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      cluster.getNameNodeRpc().saveNamespace();
      fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    } finally {
      IOUtils.cleanup(LOG, out, fs);
      if (cluster != null) {
        cluster.shutdown();
      }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

  @Test (timeout=30000)
  public void testSaveNamespaceWithDanglingLease() throws Exception {
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
        .numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    try {
      cluster.getNamesystem().leaseManager.addLease("me", "/non-existent");     
      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      cluster.getNameNodeRpc().saveNamespace();
      fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

      final CacheDirectiveInfo directive) throws Exception {
    return unprivilegedUser
        .doAs(new PrivilegedExceptionAction<Long>() {
          @Override
          public Long run() throws IOException {
            DistributedFileSystem myDfs =
                (DistributedFileSystem) FileSystem.get(conf);
            return myDfs.addCacheDirective(directive);
          }
        });
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

  @Test(timeout=60000)
  public void testListCachePoolPermissions() throws Exception {
    final UserGroupInformation myUser = UserGroupInformation
        .createRemoteUser("myuser");
    final DistributedFileSystem myDfs =
        (DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser, conf);
    final String poolName = "poolparty";
    dfs.addCachePool(new CachePoolInfo(poolName)
        .setMode(new FsPermission((short)0700)));
    // Should only see partial info
    RemoteIterator<CachePoolEntry> it = myDfs.listCachePools();
    CachePoolInfo info = it.next().getInfo();
    assertFalse(it.hasNext());
    assertEquals("Expected pool name", poolName, info.getPoolName());
    assertNull("Unexpected owner name", info.getOwnerName());
    assertNull("Unexpected group name", info.getGroupName());
    assertNull("Unexpected mode", info.getMode());
    assertNull("Unexpected limit", info.getLimit());
    // Modify the pool so myuser is now the owner
    final long limit = 99;
    dfs.modifyCachePool(new CachePoolInfo(poolName)
        .setOwnerName(myUser.getShortUserName())
        .setLimit(limit));
    // Should see full info
    it = myDfs.listCachePools();
    info = it.next().getInfo();
    assertFalse(it.hasNext());
    assertEquals("Expected pool name", poolName, info.getPoolName());
    assertEquals("Mismatched owner name", myUser.getShortUserName(),
        info.getOwnerName());
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).build();
      cluster.waitActive();
      FSNamesystem fsn = cluster.getNamesystem();
      DistributedFileSystem fs = cluster.getFileSystem();

      final Path dir = new Path("/abc/def");
      final Path file1 = new Path(dir, "f1");
      final Path file2 = new Path(dir, "f2");

      // create an empty file f1
      fs.create(file1).close();

      // create an under-construction file f2
      FSDataOutputStream out = fs.create(file2);
      out.writeBytes("hello");
      ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet
          .of(SyncFlag.UPDATE_LENGTH));

      // checkpoint
      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      fs.saveNamespace();
      fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

      cluster.restartNameNode();
      cluster.waitActive();
      fs = cluster.getFileSystem();

      assertTrue(fs.isDirectory(dir));
      assertTrue(fs.exists(file1));
      assertTrue(fs.exists(file2));

      // check internals of file2
      INodeFile file2Node = fsn.dir.getINode4Write(file2.toString()).asFile();
      assertEquals("hello".length(), file2Node.computeFileSize());
      assertTrue(file2Node.isUnderConstruction());
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

  public void testDigest() throws IOException {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
      DistributedFileSystem fs = cluster.getFileSystem();
      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      fs.saveNamespace();
      fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
      File currentDir = FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(
          0);
      File fsimage = FSImageTestUtil.findNewestImageFile(currentDir
          .getAbsolutePath());
      assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage),
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

    Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
      cluster.waitActive();
      DistributedFileSystem hdfs = cluster.getFileSystem();
      String userDir = hdfs.getHomeDirectory().toUri().getPath().toString();
      Path file = new Path(userDir, "file");
      Path dir = new Path(userDir, "/dir");
      Path link = new Path(userDir, "/link");
      hdfs.createNewFile(file);
      hdfs.mkdirs(dir);
      hdfs.createSymlink(file, link, false);

      long mtimeFile = hdfs.getFileStatus(file).getModificationTime();
      long atimeFile = hdfs.getFileStatus(file).getAccessTime();
      long mtimeDir = hdfs.getFileStatus(dir).getModificationTime();
      long mtimeLink = hdfs.getFileLinkStatus(link).getModificationTime();
      long atimeLink = hdfs.getFileLinkStatus(link).getAccessTime();

      // save namespace and restart cluster
      hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
      hdfs.saveNamespace();
      hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
      cluster.shutdown();
      cluster = new MiniDFSCluster.Builder(conf).format(false)
          .numDataNodes(1).build();
      cluster.waitActive();
      hdfs = cluster.getFileSystem();
     
      assertEquals(mtimeFile, hdfs.getFileStatus(file).getModificationTime());
      assertEquals(atimeFile, hdfs.getFileStatus(file).getAccessTime());
      assertEquals(mtimeDir, hdfs.getFileStatus(dir).getModificationTime());
      assertEquals(mtimeLink, hdfs.getFileLinkStatus(link).getModificationTime());
      assertEquals(atimeLink, hdfs.getFileLinkStatus(link).getAccessTime());
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

    final HdfsConfiguration conf = new HdfsConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
        ).numDataNodes(REPLICATION).build();
    try {
      cluster.waitActive();
      final DistributedFileSystem fs = cluster.getFileSystem();

      //create a file, write some data and leave it open.
      final Path p = new Path("/foo");
      final int size = (1 << 16) + RAN.nextInt(1 << 16);
      LOG.info("size = " + size);
      final FSDataOutputStream out = fs.create(p, REPLICATION);
      final byte[] bytes = new byte[1024];
      for(int remaining = size; remaining > 0; ) {
        RAN.nextBytes(bytes);
        final int len = bytes.length < remaining? bytes.length: remaining;
        out.write(bytes, 0, len);
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

   * Tests save namespace.
   */
  @Test
  public void testSaveNamespace() throws IOException {
    MiniDFSCluster cluster = null;
    DistributedFileSystem fs = null;
    FileContext fc;
    try {
      Configuration conf = new HdfsConfiguration();
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
      cluster.waitActive();
      fs = (cluster.getFileSystem());
      fc = FileContext.getFileContext(cluster.getURI(0));

      // Saving image without safe mode should fail
      DFSAdmin admin = new DFSAdmin(conf);
      String[] args = new String[]{"-saveNamespace"};
      try {
        admin.run(args);
      } catch(IOException eIO) {
        assertTrue(eIO.getLocalizedMessage().contains("Safe mode should be turned ON"));
      } catch(Exception e) {
        throw new IOException(e);
      }
      // create new file
      Path file = new Path("namespace.dat");
      writeFile(fs, file, replication);
      checkFile(fs, file, replication);

      // create new link
      Path symlink = new Path("file.link");
      fc.createSymlink(file, symlink, false);
      assertTrue(fc.getFileLinkStatus(symlink).isSymlink());

      // verify that the edits file is NOT empty
      Collection<URI> editsDirs = cluster.getNameEditsDirs(0);
      for(URI uri : editsDirs) {
        File ed = new File(uri.getPath());
        assertTrue(new File(ed, "current/"
                            + NNStorage.getInProgressEditsFileName(1))
                   .length() > Integer.SIZE/Byte.SIZE);
      }

      // Saving image in safe mode should succeed
      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      try {
        admin.run(args);
      } catch(Exception e) {
        throw new IOException(e);
      }
     
      // TODO: Fix the test to not require a hard-coded transaction count.
      final int EXPECTED_TXNS_FIRST_SEG = 13;
     
      // the following steps should have happened:
      //   edits_inprogress_1 -> edits_1-12  (finalized)
      //   fsimage_12 created
      //   edits_inprogress_13 created
      //
      for(URI uri : editsDirs) {
        File ed = new File(uri.getPath());
        File curDir = new File(ed, "current");
        LOG.info("Files in " + curDir + ":\n  " +
            Joiner.on("\n  ").join(curDir.list()));
        // Verify that the first edits file got finalized
        File originalEdits = new File(curDir,
                                      NNStorage.getInProgressEditsFileName(1));
        assertFalse(originalEdits.exists());
        File finalizedEdits = new File(curDir,
            NNStorage.getFinalizedEditsFileName(1, EXPECTED_TXNS_FIRST_SEG));
        GenericTestUtils.assertExists(finalizedEdits);
        assertTrue(finalizedEdits.length() > Integer.SIZE/Byte.SIZE);

        GenericTestUtils.assertExists(new File(ed, "current/"
                       + NNStorage.getInProgressEditsFileName(
                           EXPECTED_TXNS_FIRST_SEG + 1)));
      }
     
      Collection<URI> imageDirs = cluster.getNameDirs(0);
      for (URI uri : imageDirs) {
        File imageDir = new File(uri.getPath());
        File savedImage = new File(imageDir, "current/"
                                   + NNStorage.getImageFileName(
                                       EXPECTED_TXNS_FIRST_SEG));
        assertTrue("Should have saved image at " + savedImage,
            savedImage.exists());       
      }

      // restart cluster and verify file exists
      cluster.shutdown();
      cluster = null;

      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
      cluster.waitActive();
      fs = (cluster.getFileSystem());
      checkFile(fs, file, replication);
      fc = FileContext.getFileContext(cluster.getURI(0));
      assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
    } finally {
      if(fs != null) fs.close();
      cleanup(cluster);
      cluster = null;
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

      cluster.waitActive();

      final FSNamesystem namesystem = cluster.getNamesystem();
      final DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
      List<DataNode> datanodes = cluster.getDataNodes();
      final DistributedFileSystem fs = cluster.getFileSystem();

      // trigger heartbeats in case not already sent
      triggerHeartbeats(datanodes);
     
      // check that all nodes are live and in service
      int expectedTotalLoad = nodes;  // xceiver server adds 1 to load
      int expectedInServiceNodes = nodes;
      int expectedInServiceLoad = nodes;
      assertEquals(nodes, namesystem.getNumLiveDataNodes());
      assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
      assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
      assertEquals((double)expectedInServiceLoad/expectedInServiceLoad,
          namesystem.getInServiceXceiverAverage(), EPSILON);
     
      // shutdown half the nodes and force a heartbeat check to ensure
      // counts are accurate
      for (int i=0; i < nodes/2; i++) {
        DataNode dn = datanodes.get(i);
        DatanodeDescriptor dnd = dnm.getDatanode(dn.getDatanodeId());
        dn.shutdown();
        dnd.setLastUpdate(0L);
        BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
        expectedInServiceNodes--;
        assertEquals(expectedInServiceNodes, namesystem.getNumLiveDataNodes());
        assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
      }

      // restart the nodes to verify that counts are correct after
      // node re-registration
      cluster.restartDataNodes();
      cluster.waitActive();
      datanodes = cluster.getDataNodes();
      expectedInServiceNodes = nodes;
      assertEquals(nodes, datanodes.size());
      assertEquals(nodes, namesystem.getNumLiveDataNodes());
      assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
      assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
      assertEquals((double)expectedInServiceLoad/expectedInServiceLoad,
          namesystem.getInServiceXceiverAverage(), EPSILON);
     
      // create streams and hsync to force datastreamers to start
      DFSOutputStream[] streams = new DFSOutputStream[fileCount];
      for (int i=0; i < fileCount; i++) {
        streams[i] = (DFSOutputStream)fs.create(new Path("/f"+i), fileRepl)
            .getWrappedStream();
        streams[i].write("1".getBytes());
        streams[i].hsync();
        // the load for writers is 2 because both the write xceiver & packet
        // responder threads are counted in the load
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.