Package org.apache.hadoop.raid.DistBlockIntegrityMonitor

Examples of org.apache.hadoop.raid.DistBlockIntegrityMonitor.Worker


    String[] fileNames = {"file0", "file1", "file2", "file3", "file4"};
    long[][] crcs = new long[fileNames.length][];
    FileStatus[] files = new FileStatus[fileNames.length];
   
    createRandomFiles(filePath, fileNames, 2, blocksPerFile, crcs, files);
    Worker bc =
      ((DistBlockIntegrityMonitor) raidnode.blockIntegrityMonitor).getDecommissioningMonitor();
   
    for (FileStatus file : files) {
      printFileLocations(file);
    }
   
   
    Set<String> downNodes = new HashSet<String>();
    for (int i = 0; i < numDatanodes; i++) {
      // Decommission a node and test the data source.
      String downNode = decommissionOneNode();
      downNodes.add(downNode);

      // Compute which files have decommissioning blocks and how many
      HashMap<String, Integer> decomFiles = new HashMap<String, Integer>();
      for (FileStatus file : files) {

        String path = file.getPath().toUri().getPath();
        int decommissioningBlocks = 0;
        BlockLocation[] locations =
          fileSys.getFileBlockLocations(file, 0, file.getLen());

        for (BlockLocation loc : locations) {
          String[] names = loc.getNames();
          if (downNodes.contains(names[0]) && downNodes.contains(names[1])) {
            decommissioningBlocks++;
          }
        }
        if (decommissioningBlocks > 0) {
          decomFiles.put(path, decommissioningBlocks);
        }
      }
     
      // Verify results
      // FIXME: re-enable test when the underlying issue in fsck/namesystem is resolved
      //assertEquals(decomFiles.keySet(), bf.getDecommissioningFiles().keySet()); 
    }
   
    // Un-decommission those nodes and test the data source again.
    writeExcludesFileAndRefresh(null);
    assertEquals(0, bc.getLostFiles().size());
   
    // Done.
    teardown();
  }
View Full Code Here


    final int numBlocks = STRIPE_LENGTH + 1;
    final int repl = 1;
    setup(10, -1);
   
    DistBlockIntegrityMonitor br = new DistBlockRegeneratorFake(conf);
    Worker bc = br.getDecommissioningMonitor();
   
    // Generate file
    Path raidPath = new Path("/raidrs");
   
    Path filePath = new Path("/user/hadoop/testReconstruction/file");
    long[] crcs = createRandomFile(filePath, repl, numBlocks);
    FileStatus file = fileSys.getFileStatus(filePath);
    RaidNode.doRaid(conf, file, raidPath, Codec.getCodec("rs"),
        new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
        false, repl, repl);
   
    // Do some testing
    printFileLocations(file);

    // We're gonna "decommission" the file
    TestBlockCopier.decommissioningFiles =
      new String[] { filePath.toUri().toString() };

    // "Decommission" each of the file's blocks in turn
    List<LocatedBlock> fileBlocks =
        dfs.getNameNode().getBlockLocations(filePath.toUri().toString(),
                                            0L,
                                            file.getLen()).getLocatedBlocks();
   

    for (LocatedBlock b : fileBlocks) {
      TestBlockCopier.decommissioningBlocks = new LocatedBlock[] { b };
     
      bc.checkAndReconstructBlocks();
     
      long start = System.currentTimeMillis();
      while ((br.jobsRunning() > 0)
          && ((System.currentTimeMillis() - start) < 30000)) {
        LOG.info("Waiting on block regen jobs to complete ("
            + br.jobsRunning() + " running).");
        Thread.sleep(1000);
        bc.checkJobs();
      }
    }
   
    // Verify that each block now has an extra replica.
    printFileLocations(file);
   
    fileBlocks =
      dfs.getNameNode().getBlockLocations(filePath.toUri().toString(),
          0L,
          file.getLen()).getLocatedBlocks();
    for (LocatedBlock b : fileBlocks) {
      assertEquals("block was improperly replicated",
          repl+1, b.getLocations().length);
    }
    bc.updateStatus();
    assertEquals("unexpected copy failures occurred",
        0, br.getNumFileCopyFailures());
    assertEquals("unexpected number of file copy operations",
        numBlocks, br.getNumFilesCopied());
   
View Full Code Here

    String[] fileNames = {"file0", "file1", "file2", "file3", "file4"};
    long[][] crcs = new long[fileNames.length][];
    FileStatus[] files = new FileStatus[fileNames.length];
   
    createRandomFiles(filePath, fileNames, 2, blocksPerFile, crcs, files);
    Worker bc =
      ((DistBlockIntegrityMonitor) raidnode.blockIntegrityMonitor).getDecommissioningMonitor();
   
    for (FileStatus file : files) {
      printFileLocations(file);
    }
   
   
    Set<String> downNodes = new HashSet<String>();
    for (int i = 0; i < numDatanodes; i++) {
      // Decommission a node and test the data source.
      String downNode = decommissionOneNode();
      downNodes.add(downNode);

      // Compute which files have decommissioning blocks and how many
      HashMap<String, Integer> decomFiles = new HashMap<String, Integer>();
      for (FileStatus file : files) {

        String path = file.getPath().toUri().getPath();
        int decommissioningBlocks = 0;
        BlockLocation[] locations =
          fileSys.getFileBlockLocations(file, 0, file.getLen());

        for (BlockLocation loc : locations) {
          String[] names = loc.getNames();
          if (downNodes.contains(names[0]) && downNodes.contains(names[1])) {
            decommissioningBlocks++;
          }
        }
        if (decommissioningBlocks > 0) {
          decomFiles.put(path, decommissioningBlocks);
        }
      }
     
      // Verify results
      // FIXME: re-enable test when the underlying issue in fsck/namesystem is resolved
      //assertEquals(decomFiles.keySet(), bf.getDecommissioningFiles().keySet()); 
    }
   
    // Un-decommission those nodes and test the data source again.
    writeExcludesFileAndRefresh(null);
    assertEquals(0, bc.getLostFiles(fileSys).size());
   
    // Done.
    teardown();
  }
View Full Code Here

    final int numBlocks = STRIPE_LENGTH + 1;
    final int repl = 1;
    setup(10, -1);
   
    DistBlockIntegrityMonitor br = new DistBlockRegeneratorFake(conf);
    Worker bc = br.getDecommissioningMonitor();
   
    // Generate file
    Path raidPath = new Path("/raidrs");
   
    Path filePath = new Path("/user/hadoop/testReconstruction/file");
    long[] crcs = createRandomFile(filePath, repl, numBlocks);
    FileStatus file = fileSys.getFileStatus(filePath);
    RaidNode.doRaid(conf, file, raidPath, Codec.getCodec("rs"),
        new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
        false, repl, repl);
   
    // Do some testing
    printFileLocations(file);

    // We're gonna "decommission" the file
    TestBlockCopier.decommissioningFiles =
      new String[] { filePath.toUri().toString() };

    // "Decommission" each of the file's blocks in turn
    List<LocatedBlock> fileBlocks =
        dfs.getNameNode().getBlockLocations(filePath.toUri().toString(),
                                            0L,
                                            file.getLen()).getLocatedBlocks();
   

    for (LocatedBlock b : fileBlocks) {
      TestBlockCopier.decommissioningBlocks = new LocatedBlock[] { b };
     
      bc.checkAndReconstructBlocks();
     
      long start = System.currentTimeMillis();
      while ((br.jobsRunning() > 0)
          && ((System.currentTimeMillis() - start) < 30000)) {
        LOG.info("Waiting on block regen jobs to complete ("
            + br.jobsRunning() + " running).");
        Thread.sleep(1000);
        bc.checkJobs();
      }
    }
   
    // Verify that each block now has an extra replica.
    printFileLocations(file);
   
    fileBlocks =
      dfs.getNameNode().getBlockLocations(filePath.toUri().toString(),
          0L,
          file.getLen()).getLocatedBlocks();
    for (LocatedBlock b : fileBlocks) {
      assertEquals("block was improperly replicated",
          repl+1, b.getLocations().length);
    }
    bc.updateStatus();
    assertEquals("unexpected copy failures occurred",
        0, br.getNumFileCopyFailures());
    assertEquals("unexpected number of file copy operations",
        numBlocks, br.getNumFilesCopied());
   
View Full Code Here

TOP

Related Classes of org.apache.hadoop.raid.DistBlockIntegrityMonitor.Worker

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.