Examples of DFSTestUtil


Examples of org.apache.hadoop.hdfs.DFSTestUtil

  }
 
  @Test
  public void testFsckMoveAndDelete() throws Exception {
    final int MAX_MOVE_TRIES = 5;
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("TestFsckMoveAndDelete").setNumFiles(5).build();
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
      Configuration conf = new HdfsConfiguration();
      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
      conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
      String topDir = "/srcdat";
      fs = cluster.getFileSystem();
      cluster.waitActive();
      util.createFiles(fs, topDir);
      util.waitReplication(fs, topDir, (short)3);
      String outStr = runFsck(conf, 0, true, "/");
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
     
      // Corrupt a block by deleting it
      String[] fileNames = util.getFileNames(topDir);
      DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                          cluster.getNameNodePort()), conf);
      String corruptFileName = fileNames[0];
      ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(
          corruptFileName, 0, Long.MAX_VALUE).get(0).getBlock();
      for (int i=0; i<4; i++) {
        File blockFile = MiniDFSCluster.getBlockFile(i, block);
        if(blockFile != null && blockFile.exists()) {
          assertTrue(blockFile.delete());
        }
      }

      // We excpect the filesystem to be corrupted
      outStr = runFsck(conf, 1, false, "/");
      while (!outStr.contains(NamenodeFsck.CORRUPT_STATUS)) {
        try {
          Thread.sleep(100);
        } catch (InterruptedException ignore) {
        }
        outStr = runFsck(conf, 1, false, "/");
      }
     
      // After a fsck -move, the corrupted file should still exist.
      for (int i = 0; i < MAX_MOVE_TRIES; i++) {
        outStr = runFsck(conf, 1, true, "/", "-move" );
        assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
        String[] newFileNames = util.getFileNames(topDir);
        boolean found = false;
        for (String f : newFileNames) {
          if (f.equals(corruptFileName)) {
            found = true;
            break;
          }
        }
        assertTrue(found);
      }

      // Fix the filesystem by moving corrupted files to lost+found
      outStr = runFsck(conf, 1, true, "/", "-move", "-delete");
      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
     
      // Check to make sure we have healthy filesystem
      outStr = runFsck(conf, 0, true, "/");
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      util.cleanup(fs, topDir);
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      cluster.shutdown();
    } finally {
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      if (cluster != null) { cluster.shutdown(); }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

    }
  }
 
  @Test
  public void testFsckOpenFiles() throws Exception {
    DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
        setNumFiles(4).build();
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
      Configuration conf = new HdfsConfiguration();
      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
      String topDir = "/srcdat";
      String randomString = "HADOOP  ";
      fs = cluster.getFileSystem();
      cluster.waitActive();
      util.createFiles(fs, topDir);
      util.waitReplication(fs, topDir, (short)3);
      String outStr = runFsck(conf, 0, true, "/");
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      // Open a file for writing and do not close for now
      Path openFile = new Path(topDir + "/openFile");
      FSDataOutputStream out = fs.create(openFile);
      int writeCount = 0;
      while (writeCount != 100) {
        out.write(randomString.getBytes());
        writeCount++;                 
      }
      // We expect the filesystem to be HEALTHY and show one open file
      outStr = runFsck(conf, 0, true, topDir);
      System.out.println(outStr);
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      assertFalse(outStr.contains("OPENFORWRITE"));
      // Use -openforwrite option to list open files
      outStr = runFsck(conf, 0, true, topDir, "-openforwrite");
      System.out.println(outStr);
      assertTrue(outStr.contains("OPENFORWRITE"));
      assertTrue(outStr.contains("openFile"));
      // Close the file
      out.close();
      // Now, fsck should show HEALTHY fs and should not show any open files
      outStr = runFsck(conf, 0, true, topDir);
      System.out.println(outStr);
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      assertFalse(outStr.contains("OPENFORWRITE"));
      util.cleanup(fs, topDir);
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      cluster.shutdown();
    } finally {
      if (fs != null) {try{fs.close();} catch(Exception e){}}
      if (cluster != null) { cluster.shutdown(); }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).build();
      cluster.waitActive();
      fs = cluster.getFileSystem();
      DFSTestUtil util = new DFSTestUtil.Builder().
          setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
          setMaxSize(1024).build();
      util.createFiles(fs, "/corruptData", (short) 1);
      util.waitReplication(fs, "/corruptData", (short) 1);

      // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks");
      String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
      System.out.println("1. good fsck out: " + outStr);
      assertTrue(outStr.contains("has 0 CORRUPT files"));
      // delete the blocks
      final String bpid = cluster.getNamesystem().getBlockPoolId();
      for (int i=0; i<4; i++) {
        for (int j=0; j<=1; j++) {
          File storageDir = cluster.getInstanceStorageDir(i, j);
          File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
          File[] blocks = data_dir.listFiles();
          if (blocks == null)
            continue;
 
          for (int idx = 0; idx < blocks.length; idx++) {
            if (!blocks[idx].getName().startsWith("blk_")) {
              continue;
            }
            assertTrue("Cannot remove file.", blocks[idx].delete());
          }
        }
      }

      // wait for the namenode to see the corruption
      final NamenodeProtocols namenode = cluster.getNameNodeRpc();
      CorruptFileBlocks corruptFileBlocks = namenode
          .listCorruptFileBlocks("/corruptData", null);
      int numCorrupt = corruptFileBlocks.getFiles().length;
      while (numCorrupt == 0) {
        Thread.sleep(1000);
        corruptFileBlocks = namenode
            .listCorruptFileBlocks("/corruptData", null);
        numCorrupt = corruptFileBlocks.getFiles().length;
      }
      outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
      System.out.println("2. bad fsck out: " + outStr);
      assertTrue(outStr.contains("has 3 CORRUPT files"));

      // Do a listing on a dir which doesn't have any corrupt blocks and validate
      util.createFiles(fs, "/goodData");
      outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
      System.out.println("3. good fsck out: " + outStr);
      assertTrue(outStr.contains("has 0 CORRUPT files"));
      util.cleanup(fs,"/corruptData");
      util.cleanup(fs, "/goodData");
    } finally {
      if (cluster != null) {cluster.shutdown();}
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    try {
      // test finalized replicas
      final String TopDir = "/test";
      DFSTestUtil util = new DFSTestUtil.Builder().
          setName("TestDatanodeRestart").setNumFiles(2).build();
      util.createFiles(fs, TopDir, (short)3);
      util.waitReplication(fs, TopDir, (short)3);
      util.checkFiles(fs, TopDir);
      cluster.restartDataNodes();
      cluster.waitActive();
      util.checkFiles(fs, TopDir);
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

      conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports
      cluster = new MiniDFSCluster.Builder(conf).build();
      FileSystem fs = cluster.getFileSystem();

      // create two files with one block each
      DFSTestUtil util = new DFSTestUtil.Builder().
          setName("testCorruptFilesCorruptedBlock").setNumFiles(2).
          setMaxLevels(1).setMaxSize(512).build();
      util.createFiles(fs, "/srcdat10");

      // fetch bad file list from namenode. There should be none.
      final NameNode namenode = cluster.getNameNode();
      Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.
        getNamesystem().listCorruptFileBlocks("/", null);
      assertTrue("Namenode has " + badFiles.size()
          + " corrupt files. Expecting None.", badFiles.size() == 0);

      // Now deliberately corrupt one block
      String bpid = cluster.getNamesystem().getBlockPoolId();
      File storageDir = cluster.getInstanceStorageDir(0, 1);
      File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
      assertTrue("data directory does not exist", data_dir.exists());
      File[] blocks = data_dir.listFiles();
      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
      for (int idx = 0; idx < blocks.length; idx++) {
        if (blocks[idx].getName().startsWith("blk_") &&
            blocks[idx].getName().endsWith(".meta")) {
          //
          // shorten .meta file
          //
          RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
          FileChannel channel = file.getChannel();
          long position = channel.size() - 2;
          int length = 2;
          byte[] buffer = new byte[length];
          random.nextBytes(buffer);
          channel.write(ByteBuffer.wrap(buffer), position);
          file.close();
          LOG.info("Deliberately corrupting file " + blocks[idx].getName() +
              " at offset " + position + " length " + length);

          // read all files to trigger detection of corrupted replica
          try {
            util.checkFiles(fs, "/srcdat10");
          } catch (BlockMissingException e) {
            System.out.println("Received BlockMissingException as expected.");
          } catch (IOException e) {
            assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " +
                " but received IOException " + e, false);
          }
          break;
        }
      }

      // fetch bad file list from namenode. There should be one file.
      badFiles = namenode.getNamesystem().listCorruptFileBlocks("/", null);
      LOG.info("Namenode has bad files. " + badFiles.size());
      assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",
          badFiles.size() == 1);
      util.cleanup(fs, "/srcdat10");
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

      cluster.getNameNodeRpc().
        setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
      FileSystem fs = cluster.getFileSystem();

      // create two files with one block each
      DFSTestUtil util = new DFSTestUtil.Builder().
          setName("testListCorruptFileBlocksInSafeMode").setNumFiles(2).
          setMaxLevels(1).setMaxSize(512).build();
      util.createFiles(fs, "/srcdat10");

      // fetch bad file list from namenode. There should be none.
      Collection<FSNamesystem.CorruptFileBlockInfo> badFiles =
        cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/", null);
      assertTrue("Namenode has " + badFiles.size()
          + " corrupt files. Expecting None.", badFiles.size() == 0);

      // Now deliberately corrupt one block
      File storageDir = cluster.getInstanceStorageDir(0, 0);
      File data_dir = MiniDFSCluster.getFinalizedDir(storageDir,
          cluster.getNamesystem().getBlockPoolId());
      assertTrue("data directory does not exist", data_dir.exists());
      File[] blocks = data_dir.listFiles();
      assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
                 (blocks.length > 0));
      for (int idx = 0; idx < blocks.length; idx++) {
        if (blocks[idx].getName().startsWith("blk_") &&
            blocks[idx].getName().endsWith(".meta")) {
          //
          // shorten .meta file
          //
          RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
          FileChannel channel = file.getChannel();
          long position = channel.size() - 2;
          int length = 2;
          byte[] buffer = new byte[length];
          random.nextBytes(buffer);
          channel.write(ByteBuffer.wrap(buffer), position);
          file.close();
          LOG.info("Deliberately corrupting file " + blocks[idx].getName() +
              " at offset " + position + " length " + length);

          // read all files to trigger detection of corrupted replica
          try {
            util.checkFiles(fs, "/srcdat10");
          } catch (BlockMissingException e) {
            System.out.println("Received BlockMissingException as expected.");
          } catch (IOException e) {
            assertTrue("Corrupted replicas not handled properly. " +
                       "Expecting BlockMissingException " +
                       " but received IOException " + e, false);
          }
          break;
        }
      }

      // fetch bad file list from namenode. There should be one file.
      badFiles = cluster.getNameNode().getNamesystem().
        listCorruptFileBlocks("/", null);
      LOG.info("Namenode has bad files. " + badFiles.size());
      assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",
          badFiles.size() == 1);
      // restart namenode
      cluster.restartNameNode(0);
      fs = cluster.getFileSystem();

      // wait until replication queues have been initialized
      while (!cluster.getNameNode().namesystem.isPopulatingReplQueues()) {
        try {
          LOG.info("waiting for replication queues");
          Thread.sleep(1000);
        } catch (InterruptedException ignore) {
        }
      }

      // read all files to trigger detection of corrupted replica
      try {
        util.checkFiles(fs, "/srcdat10");
      } catch (BlockMissingException e) {
        System.out.println("Received BlockMissingException as expected.");
      } catch (IOException e) {
        assertTrue("Corrupted replicas not handled properly. " +
                   "Expecting BlockMissingException " +
                   " but received IOException " + e, false);
      }

      // fetch bad file list from namenode. There should be one file.
      badFiles = cluster.getNameNode().getNamesystem().
        listCorruptFileBlocks("/", null);
      LOG.info("Namenode has bad files. " + badFiles.size());
      assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",
          badFiles.size() == 1);

      // check that we are still in safe mode
      assertTrue("Namenode is not in safe mode",
                 cluster.getNameNode().isInSafeMode());

      // now leave safe mode so that we can clean up
      cluster.getNameNodeRpc().
        setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);

      util.cleanup(fs, "/srcdat10");
    } catch (Exception e) {
      LOG.error(StringUtils.stringifyException(e));
      throw e;
    } finally {
      if (cluster != null) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).build();
      cluster.waitActive();
      fs = cluster.getFileSystem();
      DFSTestUtil util = new DFSTestUtil.Builder().
          setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
          setMaxSize(1024).build();
      util.createFiles(fs, "/corruptData");

      final NameNode namenode = cluster.getNameNode();
      Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks =
        namenode.getNamesystem().listCorruptFileBlocks("/corruptData", null);
      int numCorrupt = corruptFileBlocks.size();
      assertTrue(numCorrupt == 0);
      // delete the blocks
      String bpid = cluster.getNamesystem().getBlockPoolId();
      for (int i = 0; i < 4; i++) {
        for (int j = 0; j <= 1; j++) {
          File storageDir = cluster.getInstanceStorageDir(i, j);
          File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
          File[] blocks = data_dir.listFiles();
          if (blocks == null)
            continue;
          // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
          // (blocks.length > 0));
          for (int idx = 0; idx < blocks.length; idx++) {
            if (!blocks[idx].getName().startsWith("blk_")) {
              continue;
            }
            LOG.info("Deliberately removing file " + blocks[idx].getName());
            assertTrue("Cannot remove file.", blocks[idx].delete());
            // break;
          }
        }
      }

      int count = 0;
      corruptFileBlocks = namenode.getNamesystem().
        listCorruptFileBlocks("/corruptData", null);
      numCorrupt = corruptFileBlocks.size();
      while (numCorrupt < 3) {
        Thread.sleep(1000);
        corruptFileBlocks = namenode.getNamesystem()
            .listCorruptFileBlocks("/corruptData", null);
        numCorrupt = corruptFileBlocks.size();
        count++;
        if (count > 30)
          break;
      }
      // Validate we get all the corrupt files
      LOG.info("Namenode has bad files. " + numCorrupt);
      assertTrue(numCorrupt == 3);
      // test the paging here

      FSNamesystem.CorruptFileBlockInfo[] cfb = corruptFileBlocks
          .toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
      // now get the 2nd and 3rd file that is corrupt
      String[] cookie = new String[]{"1"};
      Collection<FSNamesystem.CorruptFileBlockInfo> nextCorruptFileBlocks =
        namenode.getNamesystem()
          .listCorruptFileBlocks("/corruptData", cookie);
      FSNamesystem.CorruptFileBlockInfo[] ncfb = nextCorruptFileBlocks
          .toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
      numCorrupt = nextCorruptFileBlocks.size();
      assertTrue(numCorrupt == 2);
      assertTrue(ncfb[0].block.getBlockName()
          .equalsIgnoreCase(cfb[1].block.getBlockName()));

      corruptFileBlocks =
        namenode.getNamesystem()
          .listCorruptFileBlocks("/corruptData", cookie);
      numCorrupt = corruptFileBlocks.size();
      assertTrue(numCorrupt == 0);
      // Do a listing on a dir which doesn't have any corrupt blocks and
      // validate
      util.createFiles(fs, "/goodData");
      corruptFileBlocks =
        namenode.getNamesystem().listCorruptFileBlocks("/goodData", null);
      numCorrupt = corruptFileBlocks.size();
      assertTrue(numCorrupt == 0);
      util.cleanup(fs, "/corruptData");
      util.cleanup(fs, "/goodData");
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

    try {
      cluster = new MiniDFSCluster.Builder(conf).build();
      cluster.waitActive();
      fs = cluster.getFileSystem();
      DistributedFileSystem dfs = (DistributedFileSystem) fs;
      DFSTestUtil util = new DFSTestUtil.Builder().
          setName("testGetCorruptFiles").setNumFiles(3).
          setMaxLevels(1).setMaxSize(1024).build();
      util.createFiles(fs, "/corruptData");

      RemoteIterator<Path> corruptFileBlocks =
        dfs.listCorruptFileBlocks(new Path("/corruptData"));
      int numCorrupt = countPaths(corruptFileBlocks);
      assertTrue(numCorrupt == 0);
      // delete the blocks
      String bpid = cluster.getNamesystem().getBlockPoolId();
      // For loop through number of datadirectories per datanode (2)
      for (int i = 0; i < 2; i++) {
        File storageDir = cluster.getInstanceStorageDir(0, i);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        File[] blocks = data_dir.listFiles();
        if (blocks == null)
          continue;
        // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
        // (blocks.length > 0));
        for (int idx = 0; idx < blocks.length; idx++) {
          if (!blocks[idx].getName().startsWith("blk_")) {
            continue;
          }
          LOG.info("Deliberately removing file " + blocks[idx].getName());
          assertTrue("Cannot remove file.", blocks[idx].delete());
          // break;
        }
      }

      int count = 0;
      corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
      numCorrupt = countPaths(corruptFileBlocks);
      while (numCorrupt < 3) {
        Thread.sleep(1000);
        corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
        numCorrupt = countPaths(corruptFileBlocks);
        count++;
        if (count > 30)
          break;
      }
      // Validate we get all the corrupt files
      LOG.info("Namenode has bad files. " + numCorrupt);
      assertTrue(numCorrupt == 3);

      util.cleanup(fs, "/corruptData");
      util.cleanup(fs, "/goodData");
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

      FileSystem fs = cluster.getFileSystem();
      final int maxCorruptFileBlocks =
        FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED;

      // create 110 files with one block each
      DFSTestUtil util = new DFSTestUtil.Builder().setName("testMaxCorruptFiles").
          setNumFiles(maxCorruptFileBlocks * 3).setMaxLevels(1).setMaxSize(512).
          build();
      util.createFiles(fs, "/srcdat2", (short) 1);
      util.waitReplication(fs, "/srcdat2", (short) 1);

      // verify that there are no bad blocks.
      final NameNode namenode = cluster.getNameNode();
      Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.
        getNamesystem().listCorruptFileBlocks("/srcdat2", null);
      assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.",
          badFiles.size() == 0);

      // Now deliberately blocks from all files
      final String bpid = cluster.getNamesystem().getBlockPoolId();
      for (int i=0; i<4; i++) {
        for (int j=0; j<=1; j++) {
          File storageDir = cluster.getInstanceStorageDir(i, j);
          File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
          LOG.info("Removing files from " + data_dir);
          File[] blocks = data_dir.listFiles();
          if (blocks == null)
            continue;
 
          for (int idx = 0; idx < blocks.length; idx++) {
            if (!blocks[idx].getName().startsWith("blk_")) {
              continue;
            }
            assertTrue("Cannot remove file.", blocks[idx].delete());
          }
        }
      }

      badFiles =
        namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null);
       
       while (badFiles.size() < maxCorruptFileBlocks) {
        LOG.info("# of corrupt files is: " + badFiles.size());
        Thread.sleep(10000);
        badFiles = namenode.getNamesystem().
          listCorruptFileBlocks("/srcdat2", null);
      }
      badFiles = namenode.getNamesystem().
        listCorruptFileBlocks("/srcdat2", null);
      LOG.info("Namenode has bad files. " + badFiles.size());
      assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting " +
          maxCorruptFileBlocks + ".",
          badFiles.size() == maxCorruptFileBlocks);

      CorruptFileBlockIterator iter = (CorruptFileBlockIterator)
        fs.listCorruptFileBlocks(new Path("/srcdat2"));
      int corruptPaths = countPaths(iter);
      assertTrue("Expected more than " + maxCorruptFileBlocks +
                 " corrupt file blocks but got " + corruptPaths,
                 corruptPaths > maxCorruptFileBlocks);
      assertTrue("Iterator should have made more than 1 call but made " +
                 iter.getCallsMade(),
                 iter.getCallsMade() > 1);

      util.cleanup(fs, "/srcdat2");
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil

    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    try {
      // test finalized replicas
      final String TopDir = "/test";
      DFSTestUtil util = new DFSTestUtil("TestCrcCorruption", 2, 3, 8*1024);
      util.createFiles(fs, TopDir, (short)3);
      util.waitReplication(fs, TopDir, (short)3);
      util.checkFiles(fs, TopDir);
      cluster.restartDataNodes();
      cluster.waitActive();
      util.checkFiles(fs, TopDir);
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.