Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks


      long parityCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

      FileStatus parityStat = fileSys.getFileStatus(parityFile);
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
        dfs, parityFile.toUri().getPath(), 0, parityStat.getLen());
      String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
     
      assertEquals("no corrupt files expected", 0, corruptFiles.length);
      assertEquals("filesFixed() should return 0 before fixing files",
                   0, cnode.blockIntegrityMonitor.getNumFilesFixed());

      // Corrupt parity blocks for different stripes.
      int[] corruptBlockIdxs = new int[]{0, 1, 2};
      for (int idx: corruptBlockIdxs)
        corruptBlock(locs.get(idx).getBlock().getBlockName(), dfsCluster);
      RaidDFSUtil.reportCorruptBlocks(dfs, parityFile, corruptBlockIdxs,
          2*blockSize);

      corruptFiles = DFSUtil.getCorruptFiles(dfs);
      assertEquals("file not corrupted",
View Full Code Here


                                    FileStatus fsStat,
                                    int[] blockNums)
    throws IOException {
    DistributedFileSystem blockDfs = (DistributedFileSystem)cluster.getFileSystem();
    Path filePath = fsStat.getPath();
    LocatedBlocks lbs = blockDfs.getClient().namenode.
        getBlockLocations(filePath.toUri().getPath(), 0, fsStat.getLen());
    for (int blockNum: blockNums) {
      assert blockNum < lbs.getLocatedBlocks().size();
      LocatedBlock block = lbs.get(blockNum);
      TestRaidDfs.corruptBlock(filePath, block.getBlock(),
          NUM_DATANODES, true, cluster);
      // report deleted block to the name node
      LocatedBlock[] toReport = { block };
      blockDfs.getClient().namenode.reportBadBlocks(toReport);
View Full Code Here

  /**
   * removes a file block in the specified stripe
   */
  private void removeFileBlock(Path filePath, int stripe, int blockInStripe)
    throws IOException {
    LocatedBlocks fileBlocks = dfs.getClient().namenode.
      getBlockLocations(filePath.toString(), 0, FILE_BLOCKS * BLOCK_SIZE);
    if (fileBlocks.locatedBlockCount() != FILE_BLOCKS) {
      throw new IOException("expected " + FILE_BLOCKS +
                            " file blocks but found " +
                            fileBlocks.locatedBlockCount());
    }
    if (blockInStripe >= STRIPE_BLOCKS) {
      throw new IOException("blockInStripe is " + blockInStripe +
                            " but must be smaller than " + STRIPE_BLOCKS);
    }
    LocatedBlock block = fileBlocks.get(stripe * STRIPE_BLOCKS + blockInStripe);
    removeAndReportBlock(dfs, filePath, block);
    LOG.info("removed file " + filePath.toString() + " block " +
             stripe * STRIPE_BLOCKS + " in stripe " + stripe);
  }
View Full Code Here

    if (parityBlockSize != BLOCK_SIZE) {
      throw new IOException("file block size is " + BLOCK_SIZE +
                            " but parity file block size is " +
                            parityBlockSize);
    }
    LocatedBlocks parityFileBlocks = parityDFS.getClient().namenode.
      getBlockLocations(parityPathStr, 0, parityFileLength);
    if (parityFileBlocks.locatedBlockCount() != parityFileLengthInBlocks) {
      throw new IOException("expected " + parityFileLengthInBlocks +
                            " parity file blocks but got " +
                            parityFileBlocks.locatedBlockCount() +
                            " blocks");
    }
    LocatedBlock parityFileBlock = parityFileBlocks.get(stripe);
    removeAndReportBlock(parityDFS, new Path(parityPathStr), parityFileBlock);
    LOG.info("removed parity file block/stripe " + stripe +
             " for " + filePath.toString());

  }
View Full Code Here

    boolean deleted = false;
   
    for (FileStatus f: listPaths) {
      if (f.getPath().getName().startsWith("part-")) {
        final Path partPath = new Path(f.getPath().toUri().getPath());
        final LocatedBlocks partBlocks  = dfs.getClient().namenode.
          getBlockLocations(partPath.toString(),
                            0,
                            f.getLen());
       
        if (partBlocks.locatedBlockCount() <= block) {
          throw new IOException("invalid har block " + block);
        }

        final LocatedBlock partBlock = partBlocks.get(block);
        removeAndReportBlock(dfs, partPath, partBlock);
        LOG.info("removed block " + block + "/" +
                 partBlocks.locatedBlockCount() +
                 " of file " + partPath.toString() +
                 " block size " + partBlock.getBlockSize());
        deleted = true;
        break;
      }
View Full Code Here

      cnode.stop();
      cnode.join();
      cnode = null;

      FileStatus srcStat = fileSys.getFileStatus(file1);
      LocatedBlocks locations = getBlockLocations(file1, srcStat.getLen());

      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      ClientProtocol namenode = dfs.getClient().namenode;

      String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
      assertEquals(corruptFiles.length, 0);

      // Corrupt blocks in two different stripes. We can fix them.
      TestRaidDfs.corruptBlock(file1, locations.get(0).getBlock(),
               NUM_DATANODES, true, dfsCluster); // delete block
      TestRaidDfs.corruptBlock(file1, locations.get(4).getBlock(),
               NUM_DATANODES, false, dfsCluster); // corrupt block
      TestRaidDfs.corruptBlock(file1, locations.get(6).getBlock(),
               NUM_DATANODES, true, dfsCluster); // delete last (partial) block
      LocatedBlock[] toReport = new LocatedBlock[3];
      toReport[0] = locations.get(0);
      toReport[1] = locations.get(4);
      toReport[2] = locations.get(6);
      namenode.reportBadBlocks(toReport);

      corruptFiles = DFSUtil.getCorruptFiles(dfs);
      assertEquals(corruptFiles.length, 1);
      assertEquals(corruptFiles[0], file1.toString());
View Full Code Here

     
      Path corruptFile;
      int [] corruptBlockIdxs;
      if (fixSource) {
        stat = fileSys.getFileStatus(file1);
        LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
            dfs, file1.toUri().getPath(), 0, stat.getLen());
        // Corrupt blocks in two different stripes. We can fix them.
        corruptBlockIdxs = new int[]{1, 4, 6};
        for (int idx: corruptBlockIdxs) {
          TestBlockFixer.corruptBlock(locs.get(idx).getBlock().getBlockName(), dfsCluster);
        }
        RaidDFSUtil.reportCorruptBlocks(dfs, file1, corruptBlockIdxs, blockSize);
        corruptFile = file1;
      } else {
        crc1 = RaidDFSUtil.getCRC(fileSys, parity);
        stat = fileSys.getFileStatus(parity);
        LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
            dfs, parity.toUri().getPath(), 0, stat.getLen());
        corruptBlockIdxs = new int[] {0, 1, 2};
        for (int idx : corruptBlockIdxs) {
          TestBlockFixer.corruptBlock(locs.get(idx).getBlock().getBlockName(), dfsCluster);
        }
        RaidDFSUtil.reportCorruptBlocks(dfs, parity, corruptBlockIdxs, blockSize);
        corruptFile = parity;
      }
     
View Full Code Here

      bi = blocks.get(blockIndex);
      FileStatus srcFileFs = lfs.get(bi.fileIdx);
      Path srcFile = srcFileFs.getPath();
      LOG.info("Corrupt block " + bi.blockId + " of file " +
        srcFile);
      LocatedBlocks locations = RaidDFSUtil.getBlockLocations(
        (DistributedFileSystem)fileSys, srcFile.toUri().getPath(),
        0L, srcFileFs.getLen());
      TestRaidDfs.corruptBlock(srcFile,
          locations.get(bi.blockId).getBlock(),
          NUM_DATANODES, true, cluster);
      if (reportBadBlocks) {
        cluster.getNameNode().reportBadBlocks(new LocatedBlock[]
            {locations.get(bi.blockId)});
      }
      affectedFiles.add(bi.fileIdx);
    }
    // validate files
    if (validate) {
View Full Code Here

  private int initializeTest(String testName) throws IOException {
    String fileName = testName;
    createRBWFile(fileName);
    // Verify we have 1 RBW block.
    AvatarNode avatar = cluster.getPrimaryAvatar(0).avatar;
    LocatedBlocks lbks = avatar.namesystem.getBlockLocations(fileName, 0,
        Long.MAX_VALUE);
    int blocksBefore = lbks.locatedBlockCount();
    for (LocatedBlock lbk : lbks.getLocatedBlocks()) {
      DatanodeInfo[] locs = lbk.getLocations();
      assertNotNull(locs);
      assertTrue(locs.length != 0);
    }
    return blocksBefore;
View Full Code Here

  private void verifyResults(int blocksBefore, String fileName)
    throws IOException {
    // Verify we have RBWs after restart.
    AvatarNode avatarAfter = cluster.getPrimaryAvatar(0).avatar;
    LocatedBlocks lbks = avatarAfter.namesystem
        .getBlockLocations(fileName, 0,
        Long.MAX_VALUE);
    long blocksAfter = lbks.locatedBlockCount();

    System.out.println("blocksBefore : " + blocksBefore + " blocksAfter : "
        + blocksAfter);

    assertEquals(blocksBefore, blocksAfter);
    for (LocatedBlock lbk : lbks.getLocatedBlocks()) {
      DatanodeInfo[] locs = lbk.getLocations();
      assertNotNull(locs);
      assertTrue(locs.length != 0);
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.LocatedBlocks

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.