Examples of DFSTestUtil


Examples of org.apache.hadoop.hdfs.DFSTestUtil

  }

  /** do fsck */
  @Test
  public void testFsck() throws Exception {
    DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
        setNumFiles(20).build();
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
      Configuration conf = new HdfsConfiguration();
      final long precision = 1L;
      conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
      fs = cluster.getFileSystem();
      final String fileName = "/srcdat";
      util.createFiles(fs, fileName);
      util.waitReplication(fs, fileName, (short)3);
      final Path file = new Path(fileName);
      long aTime = fs.getFileStatus(file).getAccessTime();
      Thread.sleep(precision);
      setupAuditLogs();
      String outStr = runFsck(conf, 0, true, "/");
      verifyAuditLogs();
      assertEquals(aTime, fs.getFileStatus(file).getAccessTime());
      System.out.println(outStr);
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      cluster.shutdown();
     
      // restart the cluster; bring up namenode but not the data nodes
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
      outStr = runFsck(conf, 1, true, "/");
      // expect the result is corrupt
      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
      System.out.println(outStr);
     
      // bring up data nodes & cleanup cluster
      cluster.startDataNodes(conf, 4, true, null, null);
      cluster.waitActive();
      cluster.waitClusterUp();
      fs = cluster.getFileSystem();
      util.cleanup(fs, "/srcdat");
    } finally {
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

    }
  }
 
  @Test
  public void testFsckNonExistent() throws Exception {
    DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
        setNumFiles(20).build();
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
      Configuration conf = new HdfsConfiguration();
      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
      fs = cluster.getFileSystem();
      util.createFiles(fs, "/srcdat");
      util.waitReplication(fs, "/srcdat", (short)3);
      String outStr = runFsck(conf, 0, true, "/non-existent");
      assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
      System.out.println(outStr);
      util.cleanup(fs, "/srcdat");
    } finally {
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

  }

  /** Test fsck with permission set on inodes */
  @Test
  public void testFsckPermission() throws Exception {
    final DFSTestUtil util = new DFSTestUtil.Builder().
        setName(getClass().getSimpleName()).setNumFiles(20).build();
    final Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);

    MiniDFSCluster cluster = null;
    try {
      // Create a cluster with the current user, write some files
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
      final MiniDFSCluster c2 = cluster;
      final String dir = "/dfsck";
      final Path dirpath = new Path(dir);
      final FileSystem fs = c2.getFileSystem();

      util.createFiles(fs, dir);
      util.waitReplication(fs, dir, (short) 3);
      fs.setPermission(dirpath, new FsPermission((short) 0700));

      // run DFSck as another user, should fail with permission issue
      UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting(
          "ProbablyNotARealUserName", new String[] { "ShangriLa" });
      fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
        @Override
        public Object run() throws Exception {
          System.out.println(runFsck(conf, -1, true, dir));
          return null;
        }
      });
     
      // set permission and try DFSck again as the fake user, should succeed
      fs.setPermission(dirpath, new FsPermission((short) 0777));
      fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
        @Override
        public Object run() throws Exception {
          final String outStr = runFsck(conf, 0, true, dir);
          System.out.println(outStr);
          assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
          return null;
        }
      });

      util.cleanup(fs, dir);
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

    final int DFS_BLOCK_SIZE = 1024;
    final int NUM_DATANODES = 4;
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE);
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
    DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3,
        (5 * DFS_BLOCK_SIZE) + (DFS_BLOCK_SIZE - 1), 5 * DFS_BLOCK_SIZE);
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).
          numDataNodes(NUM_DATANODES).build();
      String topDir = "/srcdat";
      fs = cluster.getFileSystem();
      cluster.waitActive();
      util.createFiles(fs, topDir);
      util.waitReplication(fs, topDir, (short)3);
      String outStr = runFsck(conf, 0, true, "/");
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                          cluster.getNameNodePort()), conf);
      String fileNames[] = util.getFileNames(topDir);
      CorruptedTestFile ctFiles[] = new CorruptedTestFile[] {
        new CorruptedTestFile(fileNames[0], Sets.newHashSet(0),
          dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
        new CorruptedTestFile(fileNames[1], Sets.newHashSet(2, 3),
          dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
        new CorruptedTestFile(fileNames[2], Sets.newHashSet(4),
          dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
        new CorruptedTestFile(fileNames[3], Sets.newHashSet(0, 1, 2, 3),
          dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
        new CorruptedTestFile(fileNames[4], Sets.newHashSet(1, 2, 3, 4),
          dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE)
      };
      int totalMissingBlocks = 0;
      for (CorruptedTestFile ctFile : ctFiles) {
        totalMissingBlocks += ctFile.getTotalMissingBlocks();
      }
      for (CorruptedTestFile ctFile : ctFiles) {
        ctFile.removeBlocks();
      }
      // Wait for fsck to discover all the missing blocks
      while (true) {
        outStr = runFsck(conf, 1, false, "/");
        String numCorrupt = null;
        for (String line : outStr.split(LINE_SEPARATOR)) {
          Matcher m = numCorruptBlocksPattern.matcher(line);
          if (m.matches()) {
            numCorrupt = m.group(1);
            break;
          }
        }
        if (numCorrupt == null) {
          throw new IOException("failed to find number of corrupt " +
              "blocks in fsck output.");
        }
        if (numCorrupt.equals(Integer.toString(totalMissingBlocks))) {
          assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
          break;
        }
        try {
          Thread.sleep(100);
        } catch (InterruptedException ignore) {
        }
      }

      // Copy the non-corrupt blocks of corruptFileName to lost+found.
      outStr = runFsck(conf, 1, false, "/", "-move");
      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));

      // Make sure that we properly copied the block files from the DataNodes
      // to lost+found
      for (CorruptedTestFile ctFile : ctFiles) {
        ctFile.checkSalvagedRemains();
      }

      // Fix the filesystem by removing corruptFileName
      outStr = runFsck(conf, 1, true, "/", "-delete");
      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
     
      // Check to make sure we have a healthy filesystem
      outStr = runFsck(conf, 0, true, "/");
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      util.cleanup(fs, topDir);
    } finally {
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

  }
 
  @Test
  public void testFsckMoveAndDelete() throws Exception {
    final int MAX_MOVE_TRIES = 5;
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("TestFsckMoveAndDelete").setNumFiles(5).build();
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
      Configuration conf = new HdfsConfiguration();
      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
      conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
      String topDir = "/srcdat";
      fs = cluster.getFileSystem();
      cluster.waitActive();
      util.createFiles(fs, topDir);
      util.waitReplication(fs, topDir, (short)3);
      String outStr = runFsck(conf, 0, true, "/");
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
     
      // Corrupt a block by deleting it
      String[] fileNames = util.getFileNames(topDir);
      DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                          cluster.getNameNodePort()), conf);
      String corruptFileName = fileNames[0];
      ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(
          corruptFileName, 0, Long.MAX_VALUE).get(0).getBlock();
      for (int i=0; i<4; i++) {
        File blockFile = MiniDFSCluster.getBlockFile(i, block);
        if(blockFile != null && blockFile.exists()) {
          assertTrue(blockFile.delete());
        }
      }

      // We excpect the filesystem to be corrupted
      outStr = runFsck(conf, 1, false, "/");
      while (!outStr.contains(NamenodeFsck.CORRUPT_STATUS)) {
        try {
          Thread.sleep(100);
        } catch (InterruptedException ignore) {
        }
        outStr = runFsck(conf, 1, false, "/");
      }
     
      // After a fsck -move, the corrupted file should still exist.
      for (int i = 0; i < MAX_MOVE_TRIES; i++) {
        outStr = runFsck(conf, 1, true, "/", "-move" );
        assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
        String[] newFileNames = util.getFileNames(topDir);
        boolean found = false;
        for (String f : newFileNames) {
          if (f.equals(corruptFileName)) {
            found = true;
            break;
          }
        }
        assertTrue(found);
      }

      // Fix the filesystem by moving corrupted files to lost+found
      outStr = runFsck(conf, 1, true, "/", "-move", "-delete");
      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
     
      // Check to make sure we have healthy filesystem
      outStr = runFsck(conf, 0, true, "/");
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      util.cleanup(fs, topDir);
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      cluster.shutdown();
    } finally {
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      if (cluster != null) { cluster.shutdown(); }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

    }
  }
 
  @Test
  public void testFsckOpenFiles() throws Exception {
    DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
        setNumFiles(4).build();
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
      Configuration conf = new HdfsConfiguration();
      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
      String topDir = "/srcdat";
      String randomString = "HADOOP  ";
      fs = cluster.getFileSystem();
      cluster.waitActive();
      util.createFiles(fs, topDir);
      util.waitReplication(fs, topDir, (short)3);
      String outStr = runFsck(conf, 0, true, "/");
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      // Open a file for writing and do not close for now
      Path openFile = new Path(topDir + "/openFile");
      FSDataOutputStream out = fs.create(openFile);
      int writeCount = 0;
      while (writeCount != 100) {
        out.write(randomString.getBytes());
        writeCount++;                 
      }
      // We expect the filesystem to be HEALTHY and show one open file
      outStr = runFsck(conf, 0, true, topDir);
      System.out.println(outStr);
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      assertFalse(outStr.contains("OPENFORWRITE"));
      // Use -openforwrite option to list open files
      outStr = runFsck(conf, 0, true, topDir, "-openforwrite");
      System.out.println(outStr);
      assertTrue(outStr.contains("OPENFORWRITE"));
      assertTrue(outStr.contains("openFile"));
      // Close the file
      out.close();
      // Now, fsck should show HEALTHY fs and should not show any open files
      outStr = runFsck(conf, 0, true, topDir);
      System.out.println(outStr);
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      assertFalse(outStr.contains("OPENFORWRITE"));
      util.cleanup(fs, topDir);
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      cluster.shutdown();
    } finally {
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      if (cluster != null) { cluster.shutdown(); }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).build();
      cluster.waitActive();
      fs = cluster.getFileSystem();
      DFSTestUtil util = new DFSTestUtil.Builder().
          setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
          setMaxSize(1024).build();
      util.createFiles(fs, "/corruptData", (short) 1);
      util.waitReplication(fs, "/corruptData", (short) 1);

      // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks");
      String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
      System.out.println("1. good fsck out: " + outStr);
      assertTrue(outStr.contains("has 0 CORRUPT files"));
      // delete the blocks
      final String bpid = cluster.getNamesystem().getBlockPoolId();
      for (int i=0; i<4; i++) {
        for (int j=0; j<=1; j++) {
          File storageDir = cluster.getInstanceStorageDir(i, j);
          File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
          File[] blocks = data_dir.listFiles();
          if (blocks == null)
            continue;
 
          for (int idx = 0; idx < blocks.length; idx++) {
            if (!blocks[idx].getName().startsWith("blk_")) {
              continue;
            }
            assertTrue("Cannot remove file.", blocks[idx].delete());
          }
        }
      }

      // wait for the namenode to see the corruption
      final NamenodeProtocols namenode = cluster.getNameNodeRpc();
      CorruptFileBlocks corruptFileBlocks = namenode
          .listCorruptFileBlocks("/corruptData", null);
      int numCorrupt = corruptFileBlocks.getFiles().length;
      while (numCorrupt == 0) {
        Thread.sleep(1000);
        corruptFileBlocks = namenode
            .listCorruptFileBlocks("/corruptData", null);
        numCorrupt = corruptFileBlocks.getFiles().length;
      }
      outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
      System.out.println("2. bad fsck out: " + outStr);
      assertTrue(outStr.contains("has 3 CORRUPT files"));

      // Do a listing on a dir which doesn't have any corrupt blocks and validate
      util.createFiles(fs, "/goodData");
      outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
      System.out.println("3. good fsck out: " + outStr);
      assertTrue(outStr.contains("has 0 CORRUPT files"));
      util.cleanup(fs,"/corruptData");
      util.cleanup(fs, "/goodData");
    } finally {
      if (cluster != null) {cluster.shutdown();}
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

  }

  /** Test fsck with symlinks in the filesystem */
  @Test
  public void testFsckSymlink() throws Exception {
    final DFSTestUtil util = new DFSTestUtil.Builder().
        setName(getClass().getSimpleName()).setNumFiles(1).build();
    final Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);

    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
      final long precision = 1L;
      conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
      fs = cluster.getFileSystem();
      final String fileName = "/srcdat";
      util.createFiles(fs, fileName);
      final FileContext fc = FileContext.getFileContext(
          cluster.getConfiguration(0));
      final Path file = new Path(fileName);
      final Path symlink = new Path("/srcdat-symlink");
      fc.createSymlink(file, symlink, false);
      util.waitReplication(fs, fileName, (short)3);
      long aTime = fc.getFileStatus(symlink).getAccessTime();
      Thread.sleep(precision);
      setupAuditLogs();
      String outStr = runFsck(conf, 0, true, "/");
      verifyAuditLogs();
      assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime());
      System.out.println(outStr);
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      assertTrue(outStr.contains("Total symlinks:\t\t1"));
      util.cleanup(fs, fileName);
    } finally {
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

    return bStream.toString();
  }

  /** do fsck */
  public void testFsck() throws Exception {
    DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024);
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
      Configuration conf = new Configuration();
      conf.setLong("dfs.blockreport.intervalMsec", 10000L);
      cluster = new MiniDFSCluster(conf, 4, true, null);
      fs = cluster.getFileSystem();
      util.createFiles(fs, "/srcdat");
      util.waitReplication(fs, "/srcdat", (short)3);
      String outStr = runFsck(conf, 0, true, "/");
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      System.out.println(outStr);
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      cluster.shutdown();
     
      // restart the cluster; bring up namenode but not the data nodes
      cluster = new MiniDFSCluster(conf, 0, false, null);
      outStr = runFsck(conf, 1, true, "/");
      // expect the result is corrupt
      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
      System.out.println(outStr);
     
      // bring up data nodes & cleanup cluster
      cluster.startDataNodes(conf, 4, true, null, null);
      cluster.waitActive();
      cluster.waitClusterUp();
      fs = cluster.getFileSystem();
      util.cleanup(fs, "/srcdat");
    } finally {
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

      if (cluster != null) { cluster.shutdown(); }
    }
  }

  public void testFsckNonExistent() throws Exception {
    DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024);
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
      Configuration conf = new Configuration();
      conf.setLong("dfs.blockreport.intervalMsec", 10000L);
      cluster = new MiniDFSCluster(conf, 4, true, null);
      fs = cluster.getFileSystem();
      util.createFiles(fs, "/srcdat");
      util.waitReplication(fs, "/srcdat", (short)3);
      String outStr = runFsck(conf, 0, true, "/non-existent");
      assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
      System.out.println(outStr);
      util.cleanup(fs, "/srcdat");
    } finally {
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.