Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks


    waitForFileRaided(LOG, fileSys, srcFile, destPath);

    // Delete first block of file
    for (int blockNumToCorrupt : listBlockNumToCorrupt) {
      LOG.info("Corrupt block " + blockNumToCorrupt + " of file " + srcFile);
      LocatedBlocks locations = getBlockLocations(srcFile);
      corruptBlock(srcFile, locations.get(blockNumToCorrupt).getBlock(),
            NUM_DATANODES, true);
    }

    // Validate
    DistributedRaidFileSystem raidfs = getRaidFS();
View Full Code Here


      cnode.stop();
      cnode.join();
      cnode = null;

      FileStatus srcStat = fileSys.getFileStatus(file1);
      LocatedBlocks locations = RaidDFSUtil.getBlockLocations(
        (DistributedFileSystem) fileSys, file1.toUri().getPath(),
        0, srcStat.getLen());

      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;

      // Corrupt blocks in different stripes. We can fix them.
      int[] corruptBlockIdxs = new int[]{0, 4, 6};
      for (int idx: corruptBlockIdxs) {
        LOG.info("Corrupting block " + locations.get(idx).getBlock());
        corruptBlock(locations.get(idx).getBlock().getBlockName());
      }
      TestBlockFixer.reportCorruptBlocks(fileSys, file1, corruptBlockIdxs,
        srcStat.getBlockSize());

      String fileUriPath = file1.toUri().getPath();
      waitForCorruptBlocks(corruptBlockIdxs.length, dfs, file1);

      // Create RaidShell and fix the file.
      RaidShell shell = new RaidShell(conf);
      String[] args = new String[2];
      args[0] = "-recoverBlocks";
      args[1] = file1.toUri().getPath();
      ToolRunner.run(shell, args);

      waitForCorruptBlocks(0, dfs, file1);

      assertTrue(TestRaidDfs.validateFile(dfs, file1, file1Len, crc1));

      // Now corrupt and fix the parity file.
      FileStatus parityStat = fileSys.getFileStatus(parityFile);
      long parityCrc = getCRC(fileSys, parityFile);
      locations = RaidDFSUtil.getBlockLocations(
        dfs, parityFile.toUri().getPath(), 0, parityStat.getLen());
      corruptBlock(locations.get(0).getBlock().getBlockName());
      TestBlockFixer.reportCorruptBlocks(fileSys, parityFile, new int[]{0},
        srcStat.getBlockSize());
      waitForCorruptBlocks(1, dfs, parityFile);

      args[1] = parityFile.toUri().getPath();
View Full Code Here

      assertEquals(corruptFiles.length, 0);

      // Now corrupt the file.
      long corruptOffset = blockSize * 5;
      FileStatus srcStat = fileSys.getFileStatus(file1);
      LocatedBlocks locations = RaidDFSUtil.getBlockLocations(dfs,
          file1.toUri().getPath(), 0, srcStat.getLen());
      corruptBlock(locations.get(5).getBlock().getBlockName());
      corruptBlock(locations.get(6).getBlock().getBlockName());
      TestBlockFixer.reportCorruptBlocks(dfs, file1, new int[]{5, 6},
          srcStat.getBlockSize());

      // Ensure file is corrupted.
      corruptFiles = RaidDFSUtil.getCorruptFiles(conf);
View Full Code Here

      TestRaidDfs.waitForFileRaided(LOG, fileSys, file1, destPath);
      cnode.stop(); cnode.join();

      FileStatus srcStat = fileSys.getFileStatus(file1);
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
        dfs, file1.toUri().getPath(), 0, srcStat.getLen());

      String[] corruptFiles = RaidDFSUtil.getCorruptFiles(conf);
      assertEquals(corruptFiles.length, 0);
      assertEquals(0, cnode.blockFixer.filesFixed());

      // Corrupt blocks in two different stripes. We can fix them.
      int[] corruptBlockIdxs = new int[]{0, 4, 6};
      for (int idx: corruptBlockIdxs)
        corruptBlock(locs.get(idx).getBlock().getBlockName());
      reportCorruptBlocks(dfs, file1, corruptBlockIdxs, blockSize);

      corruptFiles = RaidDFSUtil.getCorruptFiles(conf);
      assertEquals(corruptFiles.length, 1);
      assertEquals(corruptFiles[0], file1.toUri().getPath());
View Full Code Here

      TestRaidDfs.waitForFileRaided(LOG, fileSys, file1, destPath);
      cnode.stop(); cnode.join();

      FileStatus srcStat = fileSys.getFileStatus(file1);
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
        dfs, file1.toUri().getPath(), 0, srcStat.getLen());

      String[] corruptFiles = RaidDFSUtil.getCorruptFiles(conf);
      assertEquals(corruptFiles.length, 0);
      assertEquals(0, cnode.blockFixer.filesFixed());

      corruptBlock(locs.get(0).getBlock().getBlockName());
      reportCorruptBlocks(dfs, file1, new int[]{0}, blockSize);

      corruptFiles = RaidDFSUtil.getCorruptFiles(conf);
      assertEquals(corruptFiles.length, 1);
      assertEquals(corruptFiles[0], file1.toUri().getPath());

      cnode = RaidNode.createRaidNode(null, localConf);
      long start = System.currentTimeMillis();
      while (cnode.blockFixer.filesFixed() < 1 &&
             System.currentTimeMillis() - start < 120000) {
        LOG.info("Test testGeneratedBlock waiting for files to be fixed.");
        Thread.sleep(1000);
      }
      assertEquals(1, cnode.blockFixer.filesFixed());

      // Stop RaidNode
      cnode.stop(); cnode.join(); cnode = null;

      // The block has successfully been reconstructed.
      dfs = getDFS(conf, dfs);
      assertTrue(TestRaidDfs.validateFile(dfs, file1, file1Len, crc1));

      // Now corrupt the generated block.
      locs = RaidDFSUtil.getBlockLocations(
        dfs, file1.toUri().getPath(), 0, srcStat.getLen());
      corruptBlock(locs.get(0).getBlock().getBlockName());
      reportCorruptBlocks(dfs, file1, new int[]{0}, blockSize);

      try {
        Thread.sleep(5*1000);
      } catch (InterruptedException ignore) {
View Full Code Here

      long parityCRC = getCRC(fileSys, parityFile);

      FileStatus parityStat = fileSys.getFileStatus(parityFile);
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
        dfs, parityFile.toUri().getPath(), 0, parityStat.getLen());

      String[] corruptFiles = RaidDFSUtil.getCorruptFiles(conf);
      assertEquals(corruptFiles.length, 0);
      assertEquals(0, cnode.blockFixer.filesFixed());

      // Corrupt parity blocks for different stripes.
      int[] corruptBlockIdxs = new int[]{0, 1, 2};
      for (int idx: corruptBlockIdxs)
        corruptBlock(locs.get(idx).getBlock().getBlockName());
      reportCorruptBlocks(dfs, parityFile, corruptBlockIdxs, blockSize);

      corruptFiles = RaidDFSUtil.getCorruptFiles(conf);
      assertEquals(corruptFiles.length, 1);
      assertEquals(corruptFiles[0], parityFile.toUri().getPath());
View Full Code Here

      Path partFile = new Path(harDirectory, "part-0");
      long partCRC = getCRC(fileSys, partFile);
      FileStatus partStat = fileSys.getFileStatus(partFile);
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
        dfs, partFile.toUri().getPath(), 0, partStat.getLen());
      // 7 parity blocks => 4 har blocks.
      assertEquals(4, locs.getLocatedBlocks().size());
      cnode.stop(); cnode.join();

      String[] corruptFiles = RaidDFSUtil.getCorruptFiles(conf);
      assertEquals(corruptFiles.length, 0);
      assertEquals(0, cnode.blockFixer.filesFixed());

      // Corrupt parity blocks for different stripes.
      int[] corruptBlockIdxs = new int[]{0, 3};
      for (int idx: corruptBlockIdxs)
        corruptBlock(locs.get(idx).getBlock().getBlockName());
      reportCorruptBlocks(dfs, partFile, corruptBlockIdxs,
        partStat.getBlockSize());

      corruptFiles = RaidDFSUtil.getCorruptFiles(conf);
      assertEquals(corruptFiles.length, 1);
View Full Code Here

  public void testCorruptBlock() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
    FileSystem fs = null;
    DFSClient dfsClient = null;
    LocatedBlocks blocks = null;
    int replicaCount = 0;
    Random random = new Random();
    String outStr = null;
    short factor = 1;

    MiniDFSCluster cluster = null;
    try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    Path file1 = new Path("/testCorruptBlock");
    DFSTestUtil.createFile(fs, file1, 1024, factor, 0);
    // Wait until file replication has completed
    DFSTestUtil.waitReplication(fs, file1, factor);
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);

    // Make sure filesystem is in healthy state
    outStr = runFsck(conf, 0, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
   
    // corrupt replicas
    File blockFile = MiniDFSCluster.getBlockFile(0, block);
    if (blockFile != null && blockFile.exists()) {
      RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
      FileChannel channel = raFile.getChannel();
      String badString = "BADBAD";
      int rand = random.nextInt((int) channel.size()/2);
      raFile.seek(rand);
      raFile.write(badString.getBytes());
      raFile.close();
    }
    // Read the file to trigger reportBadBlocks
    try {
      IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf,
                        true);
    } catch (IOException ie) {
      // Ignore exception
    }

    dfsClient = new DFSClient(new InetSocketAddress("localhost",
                               cluster.getNameNodePort()), conf);
    blocks = dfsClient.getNamenode().
               getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
    replicaCount = blocks.get(0).getLocations().length;
    while (replicaCount != factor) {
      try {
        Thread.sleep(100);
      } catch (InterruptedException ignore) {
      }
      blocks = dfsClient.getNamenode().
                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
      replicaCount = blocks.get(0).getLocations().length;
    }
    assertTrue (blocks.get(0).isCorrupt());

    // Check if fsck reports the same
    outStr = runFsck(conf, 1, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
View Full Code Here

    final int fileLen = 1;
    DFSTestUtil.createFile(fs, fileName, 1, (short)1, 1L);
    DFSTestUtil.waitReplication(fs, fileName, (short)1);

    // get the block belonged to the created file
    LocatedBlocks blocks = NameNodeAdapter.getBlockLocations(
        cluster.getNameNode(), fileName.toString(), 0, (long)fileLen);
    assertEquals("Should only find 1 block", blocks.locatedBlockCount(), 1);
    LocatedBlock block = blocks.get(0);

    // bring up a second datanode
    cluster.startDataNodes(conf, 1, true, null, null);
    cluster.waitActive();
    final int sndNode = 1;
View Full Code Here

      return;
    }
    long fileLen = file.getLen();
    // Get block locations without updating the file access time
    // and without block access tokens
    LocatedBlocks blocks;
    try {
      blocks = namenode.getNamesystem().getBlockLocations(path, 0,
          fileLen, false, false, false);
    } catch (FileNotFoundException fnfe) {
      blocks = null;
    }
    if (blocks == null) { // the file is deleted
      return;
    }
    isOpen = blocks.isUnderConstruction();
    if (isOpen && !showOpenFiles) {
      // We collect these stats about open files to report with default options
      res.totalOpenFilesSize += fileLen;
      res.totalOpenFilesBlocks += blocks.locatedBlockCount();
      res.totalOpenFiles++;
      return;
    }
    res.totalFiles++;
    res.totalSize += fileLen;
    res.totalBlocks += blocks.locatedBlockCount();
    if (showOpenFiles && isOpen) {
      out.print(path + " " + fileLen + " bytes, " +
        blocks.locatedBlockCount() + " block(s), OPENFORWRITE: ");
    } else if (showFiles) {
      out.print(path + " " + fileLen + " bytes, " +
        blocks.locatedBlockCount() + " block(s): ");
    } else {
      out.print('.');
    }
    if (res.totalFiles % 100 == 0) { out.println(); out.flush(); }
    int missing = 0;
    int corrupt = 0;
    long missize = 0;
    int underReplicatedPerFile = 0;
    int misReplicatedPerFile = 0;
    StringBuilder report = new StringBuilder();
    int i = 0;
    for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
      ExtendedBlock block = lBlk.getBlock();
      boolean isCorrupt = lBlk.isCorrupt();
      String blkName = block.toString();
      DatanodeInfo[] locs = lBlk.getLocations();
      NumberReplicas numberReplicas = namenode.getNamesystem().getBlockManager().countNodes(block.getLocalBlock());
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.LocatedBlocks

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.