Examples of DFSAdmin


Examples of org.apache.hadoop.hdfs.tools.DFSAdmin

    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
    final FileSystem fs = cluster.getFileSystem();
    assertTrue("Not a HDFS: "+fs.getUri(),
                fs instanceof DistributedFileSystem);
    final DistributedFileSystem dfs = (DistributedFileSystem)fs;
    DFSAdmin admin = new DFSAdmin(conf);
   
    try {
      final int fileLen = 1024;
      final short replication = 5;
      final long spaceQuota = fileLen * replication * 15 / 8;

      // 1: create a directory /test and set its quota to be 3
      final Path parent = new Path("/test");
      assertTrue(dfs.mkdirs(parent));
      String[] args = new String[]{"-setQuota", "3", parent.toString()};
      runCommand(admin, args, false);

      //try setting space quota with a 'binary prefix'
      runCommand(admin, false, "-setSpaceQuota", "2t", parent.toString());
      assertEquals(2L<<40, dfs.getContentSummary(parent).getSpaceQuota());
     
      // set diskspace quota to 10000
      runCommand(admin, false, "-setSpaceQuota",
                 Long.toString(spaceQuota), parent.toString());
     
      // 2: create directory /test/data0
      final Path childDir0 = new Path(parent, "data0");
      assertTrue(dfs.mkdirs(childDir0));

      // 3: create a file /test/datafile0
      final Path childFile0 = new Path(parent, "datafile0");
      DFSTestUtil.createFile(fs, childFile0, fileLen, replication, 0);
     
      // 4: count -q /test
      ContentSummary c = dfs.getContentSummary(parent);
      assertEquals(c.getFileCount()+c.getDirectoryCount(), 3);
      assertEquals(c.getQuota(), 3);
      assertEquals(c.getSpaceConsumed(), fileLen*replication);
      assertEquals(c.getSpaceQuota(), spaceQuota);
     
      // 5: count -q /test/data0
      c = dfs.getContentSummary(childDir0);
      assertEquals(c.getFileCount()+c.getDirectoryCount(), 1);
      assertEquals(c.getQuota(), -1);
      // check disk space consumed
      c = dfs.getContentSummary(parent);
      assertEquals(c.getSpaceConsumed(), fileLen*replication);

      // 6: create a directory /test/data1
      final Path childDir1 = new Path(parent, "data1");
      boolean hasException = false;
      try {
        assertFalse(dfs.mkdirs(childDir1));
      } catch (NSQuotaExceededException e) {
        hasException = true;
      }
      assertTrue(hasException);
     
      OutputStream fout;
     
      // 7: create a file /test/datafile1
      final Path childFile1 = new Path(parent, "datafile1");
      hasException = false;
      try {
        fout = dfs.create(childFile1);
      } catch (NSQuotaExceededException e) {
        hasException = true;
      }
      assertTrue(hasException);
     
      // 8: clear quota /test
      runCommand(admin, new String[]{"-clrQuota", parent.toString()}, false);
      c = dfs.getContentSummary(parent);
      assertEquals(c.getQuota(), -1);
      assertEquals(c.getSpaceQuota(), spaceQuota);
     
      // 9: clear quota /test/data0
      runCommand(admin, new String[]{"-clrQuota", childDir0.toString()}, false);
      c = dfs.getContentSummary(childDir0);
      assertEquals(c.getQuota(), -1);
     
      // 10: create a file /test/datafile1
      fout = dfs.create(childFile1, replication);
     
      // 10.s: but writing fileLen bytes should result in an quota exception
      hasException = false;
      try {
        fout.write(new byte[fileLen]);
        fout.close();
      } catch (DSQuotaExceededException e) {
        hasException = true;
        IOUtils.closeStream(fout);
      }
      assertTrue(hasException);
     
      //delete the file
      dfs.delete(childFile1, false);
     
      // 9.s: clear diskspace quota
      runCommand(admin, false, "-clrSpaceQuota", parent.toString());
      c = dfs.getContentSummary(parent);
      assertEquals(c.getQuota(), -1);
      assertEquals(c.getSpaceQuota(), -1);      
     
      // now creating childFile1 should succeed
      DFSTestUtil.createFile(dfs, childFile1, fileLen, replication, 0);
     
      // 11: set the quota of /test to be 1
      // HADOOP-5872 - we can set quota even if it is immediately violated
      args = new String[]{"-setQuota", "1", parent.toString()};
      runCommand(admin, args, false);
      runCommand(admin, false, "-setSpaceQuota"// for space quota
                 Integer.toString(fileLen), args[2]);
     
      // 12: set the quota of /test/data0 to be 1
      args = new String[]{"-setQuota", "1", childDir0.toString()};
      runCommand(admin, args, false);
     
      // 13: not able create a directory under data0
      hasException = false;
      try {
        assertFalse(dfs.mkdirs(new Path(childDir0, "in")));
      } catch (NSQuotaExceededException e) {
        hasException = true;
      }
      assertTrue(hasException);
      c = dfs.getContentSummary(childDir0);
      assertEquals(c.getDirectoryCount()+c.getFileCount(), 1);
      assertEquals(c.getQuota(), 1);
     
      // 14a: set quota on a non-existent directory
      Path nonExistentPath = new Path("/test1");
      assertFalse(dfs.exists(nonExistentPath));
      args = new String[]{"-setQuota", "1", nonExistentPath.toString()};
      runCommand(admin, args, true);
      runCommand(admin, true, "-setSpaceQuota", "1g", // for space quota
                 nonExistentPath.toString());
     
      // 14b: set quota on a file
      assertTrue(dfs.isFile(childFile0));
      args[1] = childFile0.toString();
      runCommand(admin, args, true);
      // same for space quota
      runCommand(admin, true, "-setSpaceQuota", "1t", args[1]);
     
      // 15a: clear quota on a file
      args[0] = "-clrQuota";
      runCommand(admin, args, true);
      runCommand(admin, true, "-clrSpaceQuota", args[1]);
     
      // 15b: clear quota on a non-existent directory
      args[1] = nonExistentPath.toString();
      runCommand(admin, args, true);
      runCommand(admin, true, "-clrSpaceQuota", args[1]);
     
      // 16a: set the quota of /test to be 0
      args = new String[]{"-setQuota", "0", parent.toString()};
      runCommand(admin, args, true);
      runCommand(admin, true, "-setSpaceQuota", "0", args[2]);
     
      // 16b: set the quota of /test to be -1
      args[1] = "-1";
      runCommand(admin, args, true);
      runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
     
      // 16c: set the quota of /test to be Long.MAX_VALUE+1
      args[1] = String.valueOf(Long.MAX_VALUE+1L);
      runCommand(admin, args, true);
      runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
     
      // 16d: set the quota of /test to be a non integer
      args[1] = "33aa1.5";
      runCommand(admin, args, true);
      runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
     
      // 16e: set space quota with a value larger than Long.MAX_VALUE
      runCommand(admin, true, "-setSpaceQuota",
                 (Long.MAX_VALUE/1024/1024 + 1024) + "m", args[2]);
     
      // 17:  setQuota by a non-administrator
      final String username = "userxx";
      UserGroupInformation ugi =
        UserGroupInformation.createUserForTesting(username,
                                                  new String[]{"groupyy"});
     
      final String[] args2 = args.clone(); // need final ref for doAs block
      ugi.doAs(new PrivilegedExceptionAction<Object>() {
        @Override
        public Object run() throws Exception {
          assertEquals("Not running as new user", username,
              UserGroupInformation.getCurrentUser().getShortUserName());
          DFSAdmin userAdmin = new DFSAdmin(conf);
         
          args2[1] = "100";
          runCommand(userAdmin, args2, true);
          runCommand(userAdmin, true, "-setSpaceQuota", "1g", args2[2]);
         
View Full Code Here

Examples of org.apache.hadoop.hdfs.tools.DFSAdmin

    final int BLOCK_SIZE = 6 * 1024;
    conf.set("dfs.block.size", Integer.toString(BLOCK_SIZE));
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    DFSAdmin admin = new DFSAdmin(conf);

    try {
      Path dir = new Path("/test");
      Path file1 = new Path("/test/test1");
      Path file2 = new Path("/test/test2");
View Full Code Here

Examples of org.apache.hadoop.hdfs.tools.DFSAdmin

    final int BLOCK_SIZE = 6 * 1024;
    conf.set("dfs.block.size", Integer.toString(BLOCK_SIZE));
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    DFSAdmin admin = new DFSAdmin(conf);

    try {
      Path dir = new Path("/test");
      boolean exceededQuota = false;
      ContentSummary c;
View Full Code Here

Examples of org.apache.hadoop.hdfs.tools.DFSAdmin

  public void finalizeCluster(Configuration conf) throws Exception {
    if (nameNode == null) {
      throw new IllegalStateException("Attempting to finalize "
                                      + "Namenode but it is not running");
    }
    ToolRunner.run(new DFSAdmin(conf), new String[] {"-finalizeUpgrade"});
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.tools.DFSAdmin

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem fs = cluster.getFileSystem();
    assertTrue("Not a HDFS: "+fs.getUri(),
                fs instanceof DistributedFileSystem);
    final DistributedFileSystem dfs = (DistributedFileSystem)fs;
    DFSAdmin admin = new DFSAdmin(conf);
   
    try {
      final int fileLen = 1024;
      final short replication = 5;
      final long spaceQuota = fileLen * replication * 15 / 8;

      // 1: create a directory /test and set its quota to be 3
      final Path parent = new Path("/test");
      assertTrue(dfs.mkdirs(parent));
      String[] args = new String[]{"-setQuota", "3", parent.toString()};
      runCommand(admin, args, false);

      //try setting space quota with a 'binary prefix'
      runCommand(admin, false, "-setSpaceQuota", "2t", parent.toString());
      assertEquals(2L<<40, dfs.getContentSummary(parent).getSpaceQuota());
     
      // set diskspace quota to 10000
      runCommand(admin, false, "-setSpaceQuota",
                 Long.toString(spaceQuota), parent.toString());
     
      // 2: create directory /test/data0
      final Path childDir0 = new Path(parent, "data0");
      assertTrue(dfs.mkdirs(childDir0));

      // 3: create a file /test/datafile0
      final Path childFile0 = new Path(parent, "datafile0");
      DFSTestUtil.createFile(fs, childFile0, fileLen, replication, 0);
     
      // 4: count -q /test
      ContentSummary c = dfs.getContentSummary(parent);
      assertEquals(c.getFileCount()+c.getDirectoryCount(), 3);
      assertEquals(c.getQuota(), 3);
      assertEquals(c.getSpaceConsumed(), fileLen*replication);
      assertEquals(c.getSpaceQuota(), spaceQuota);
     
      // 5: count -q /test/data0
      c = dfs.getContentSummary(childDir0);
      assertEquals(c.getFileCount()+c.getDirectoryCount(), 1);
      assertEquals(c.getQuota(), -1);
      // check disk space consumed
      c = dfs.getContentSummary(parent);
      assertEquals(c.getSpaceConsumed(), fileLen*replication);

      // 6: create a directory /test/data1
      final Path childDir1 = new Path(parent, "data1");
      boolean hasException = false;
      try {
        assertFalse(dfs.mkdirs(childDir1));
      } catch (QuotaExceededException e) {
        hasException = true;
      }
      assertTrue(hasException);
     
      OutputStream fout;
     
      // 7: create a file /test/datafile1
      final Path childFile1 = new Path(parent, "datafile1");
      hasException = false;
      try {
        fout = dfs.create(childFile1);
      } catch (QuotaExceededException e) {
        hasException = true;
      }
      assertTrue(hasException);
     
      // 8: clear quota /test
      runCommand(admin, new String[]{"-clrQuota", parent.toString()}, false);
      c = dfs.getContentSummary(parent);
      assertEquals(c.getQuota(), -1);
      assertEquals(c.getSpaceQuota(), spaceQuota);
     
      // 9: clear quota /test/data0
      runCommand(admin, new String[]{"-clrQuota", childDir0.toString()}, false);
      c = dfs.getContentSummary(childDir0);
      assertEquals(c.getQuota(), -1);
     
      // 10: create a file /test/datafile1
      fout = dfs.create(childFile1, replication);
     
      // 10.s: but writing fileLen bytes should result in an quota exception
      hasException = false;
      try {
        fout.write(new byte[fileLen]);
        fout.close();
      } catch (QuotaExceededException e) {
        hasException = true;
        IOUtils.closeStream(fout);
      }
      assertTrue(hasException);
     
      //delete the file
      dfs.delete(childFile1, false);
     
      // 9.s: clear diskspace quota
      runCommand(admin, false, "-clrSpaceQuota", parent.toString());
      c = dfs.getContentSummary(parent);
      assertEquals(c.getQuota(), -1);
      assertEquals(c.getSpaceQuota(), -1);      
     
      // now creating childFile1 should succeed
      DFSTestUtil.createFile(dfs, childFile1, fileLen, replication, 0);
     
      // 11: set the quota of /test to be 1
      // HADOOP-5872 - we can set quota even if it is immediately violated
      args = new String[]{"-setQuota", "1", parent.toString()};
      runCommand(admin, args, false);
      runCommand(admin, false, "-setSpaceQuota"// for space quota
                 Integer.toString(fileLen), args[2]);
     
      // 12: set the quota of /test/data0 to be 1
      args = new String[]{"-setQuota", "1", childDir0.toString()};
      runCommand(admin, args, false);
     
      // 13: not able create a directory under data0
      hasException = false;
      try {
        assertFalse(dfs.mkdirs(new Path(childDir0, "in")));
      } catch (QuotaExceededException e) {
        hasException = true;
      }
      assertTrue(hasException);
      c = dfs.getContentSummary(childDir0);
      assertEquals(c.getDirectoryCount()+c.getFileCount(), 1);
      assertEquals(c.getQuota(), 1);
     
      // 14a: set quota on a non-existent directory
      Path nonExistentPath = new Path("/test1");
      assertFalse(dfs.exists(nonExistentPath));
      args = new String[]{"-setQuota", "1", nonExistentPath.toString()};
      runCommand(admin, args, true);
      runCommand(admin, true, "-setSpaceQuota", "1g", // for space quota
                 nonExistentPath.toString());
     
      // 14b: set quota on a file
      assertTrue(dfs.isFile(childFile0));
      args[1] = childFile0.toString();
      runCommand(admin, args, true);
      // same for space quota
      runCommand(admin, true, "-setSpaceQuota", "1t", args[1]);
     
      // 15a: clear quota on a file
      args[0] = "-clrQuota";
      runCommand(admin, args, true);
      runCommand(admin, true, "-clrSpaceQuota", args[1]);
     
      // 15b: clear quota on a non-existent directory
      args[1] = nonExistentPath.toString();
      runCommand(admin, args, true);
      runCommand(admin, true, "-clrSpaceQuota", args[1]);
     
      // 16a: set the quota of /test to be 0
      args = new String[]{"-setQuota", "0", parent.toString()};
      runCommand(admin, args, true);
      runCommand(admin, true, "-setSpaceQuota", "0", args[2]);
     
      // 16b: set the quota of /test to be -1
      args[1] = "-1";
      runCommand(admin, args, true);
      runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
     
      // 16c: set the quota of /test to be Long.MAX_VALUE+1
      args[1] = String.valueOf(Long.MAX_VALUE+1L);
      runCommand(admin, args, true);
      runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
     
      // 16d: set the quota of /test to be a non integer
      args[1] = "33aa1.5";
      runCommand(admin, args, true);
      runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
     
      // 16e: set space quota with a value larger than Long.MAX_VALUE
      runCommand(admin, true, "-setSpaceQuota",
                 (Long.MAX_VALUE/1024/1024 + 1024) + "m", args[2]);
     
      // 17:  setQuota by a non-administrator
      final String username = "userxx";
      UserGroupInformation ugi =
        UserGroupInformation.createUserForTesting(username,
                                                  new String[]{"groupyy"});
     
      final String[] args2 = args.clone(); // need final ref for doAs block
      ugi.doAs(new PrivilegedExceptionAction<Object>() {
        @Override
        public Object run() throws Exception {
          assertEquals("Not running as new user", username,
              UserGroupInformation.getCurrentUser().getShortUserName());
          DFSAdmin userAdmin = new DFSAdmin(conf);
         
          args2[1] = "100";
          runCommand(userAdmin, args2, true);
          runCommand(userAdmin, true, "-setSpaceQuota", "1g", args2[2]);
         
View Full Code Here

Examples of org.apache.hadoop.hdfs.tools.DFSAdmin

    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
    MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    DFSAdmin admin = new DFSAdmin(conf);

    final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
    final String webhdfsuri = WebHdfsFileSystem.SCHEME  + "://" + nnAddr;
    System.out.println("webhdfsuri=" + webhdfsuri);
    final FileSystem webhdfs = new Path(webhdfsuri).getFileSystem(conf);
View Full Code Here

Examples of org.apache.hadoop.hdfs.tools.DFSAdmin

    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
    MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    DFSAdmin admin = new DFSAdmin(conf);

    final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
    final String webhdfsuri = WebHdfsFileSystem.SCHEME  + "://" + nnAddr;
    System.out.println("webhdfsuri=" + webhdfsuri);
    final FileSystem webhdfs = new Path(webhdfsuri).getFileSystem(conf);
View Full Code Here

Examples of org.apache.hadoop.hdfs.tools.DFSAdmin

      cluster.waitActive();
      fs = (DistributedFileSystem)(cluster.getFileSystem());
      fc = FileContext.getFileContext(cluster.getURI(0));

      // Saving image without safe mode should fail
      DFSAdmin admin = new DFSAdmin(conf);
      String[] args = new String[]{"-saveNamespace"};
      try {
        admin.run(args);
      } catch(IOException eIO) {
        assertTrue(eIO.getLocalizedMessage().contains("Safe mode should be turned ON"));
      } catch(Exception e) {
        throw new IOException(e);
      }
      // create new file
      Path file = new Path("namespace.dat");
      writeFile(fs, file, replication);
      checkFile(fs, file, replication);

      // create new link
      Path symlink = new Path("file.link");
      fc.createSymlink(file, symlink, false);
      assertTrue(fc.getFileLinkStatus(symlink).isSymlink());

      // verify that the edits file is NOT empty
      Collection<URI> editsDirs = cluster.getNameEditsDirs(0);
      for(URI uri : editsDirs) {
        File ed = new File(uri.getPath());
        assertTrue(new File(ed, "current/"
                            + NNStorage.getInProgressEditsFileName(1))
                   .length() > Integer.SIZE/Byte.SIZE);
      }

      // Saving image in safe mode should succeed
      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      try {
        admin.run(args);
      } catch(Exception e) {
        throw new IOException(e);
      }
     
      final int EXPECTED_TXNS_FIRST_SEG = 12;
View Full Code Here

Examples of org.apache.hadoop.hdfs.tools.DFSAdmin

   * @throws Exception
   */
  @Test (timeout = 30000)
  public void testInvalidShell() throws Exception {
    Configuration conf = new Configuration(); // default FS (non-DFS)
    DFSAdmin admin = new DFSAdmin();
    admin.setConf(conf);
    int res = admin.run(new String[] {"-refreshNodes"});
    assertEquals("expected to fail -1", res , -1);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.tools.DFSAdmin

      cluster.waitActive();
      fs = (cluster.getFileSystem());
      fc = FileContext.getFileContext(cluster.getURI(0));

      // Saving image without safe mode should fail
      DFSAdmin admin = new DFSAdmin(conf);
      String[] args = new String[]{"-saveNamespace"};
      try {
        admin.run(args);
      } catch(IOException eIO) {
        assertTrue(eIO.getLocalizedMessage().contains("Safe mode should be turned ON"));
      } catch(Exception e) {
        throw new IOException(e);
      }
      // create new file
      Path file = new Path("namespace.dat");
      writeFile(fs, file, replication);
      checkFile(fs, file, replication);

      // create new link
      Path symlink = new Path("file.link");
      fc.createSymlink(file, symlink, false);
      assertTrue(fc.getFileLinkStatus(symlink).isSymlink());

      // verify that the edits file is NOT empty
      Collection<URI> editsDirs = cluster.getNameEditsDirs(0);
      for(URI uri : editsDirs) {
        File ed = new File(uri.getPath());
        assertTrue(new File(ed, "current/"
                            + NNStorage.getInProgressEditsFileName(1))
                   .length() > Integer.SIZE/Byte.SIZE);
      }

      // Saving image in safe mode should succeed
      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      try {
        admin.run(args);
      } catch(Exception e) {
        throw new IOException(e);
      }
     
      // TODO: Fix the test to not require a hard-coded transaction count.
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.