Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks


   */
  public void testBadBlockReportOnTransfer() throws Exception {
    Configuration conf = new Configuration();
    FileSystem fs = null;
    DFSClient dfsClient = null;
    LocatedBlocks blocks = null;
    int replicaCount = 0;
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
    cluster.waitActive();
    fs = cluster.getFileSystem();
    dfsClient = new DFSClient(new InetSocketAddress("localhost",
                              cluster.getNameNodePort()), conf);
 
    // Create file with replication factor of 1
    Path file1 = new Path("/tmp/testBadBlockReportOnTransfer/file1");
    DFSTestUtil.createFile(fs, file1, 1024, (short)1, 0);
    DFSTestUtil.waitReplication(fs, file1, (short)1);
 
    // Corrupt the block belonging to the created file
    String block = DFSTestUtil.getFirstBlock(fs, file1).getBlockName();
    cluster.corruptBlockOnDataNodes(block);
 
    // Increase replication factor, this should invoke transfer request
    // Receiving datanode fails on checksum and reports it to namenode
    fs.setReplication(file1, (short)2);
 
    // Now get block details and check if the block is corrupt
    blocks = dfsClient.namenode.
              getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
    while (blocks.get(0).isCorrupt() != true) {
      try {
        LOG.info("Waiting until block is marked as corrupt...");
        Thread.sleep(1000);
      } catch (InterruptedException ie) {
      }
      blocks = dfsClient.namenode.
                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
    }
    replicaCount = blocks.get(0).getLocations().length;
    assertTrue(replicaCount == 1);
    cluster.shutdown();
  }
View Full Code Here


    //wait for all the blocks to be replicated;
    LOG.info("Checking for block replication for " + filename);
    int iters = 0;
    while (true) {
      boolean replOk = true;
      LocatedBlocks blocks = namenode.getBlockLocations(filename, 0,
                                                        Long.MAX_VALUE);
     
      for (Iterator<LocatedBlock> iter = blocks.getLocatedBlocks().iterator();
           iter.hasNext();) {
        LocatedBlock block = iter.next();
        int actual = block.getLocations().length;
        if ( actual < expected ) {
          if (true || iters > 0) {
View Full Code Here

  public void testCorruptBlock() throws Exception {
    Configuration conf = new Configuration();
    conf.setLong("dfs.blockreport.intervalMsec", 1000);
    FileSystem fs = null;
    DFSClient dfsClient = null;
    LocatedBlocks blocks = null;
    int replicaCount = 0;
    Random random = new Random();
    String outStr = null;

    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();
    fs = cluster.getFileSystem();
    Path file1 = new Path("/testCorruptBlock");
    DFSTestUtil.createFile(fs, file1, 1024, (short)3, 0);
    // Wait until file replication has completed
    DFSTestUtil.waitReplication(fs, file1, (short)3);
    String block = DFSTestUtil.getFirstBlock(fs, file1).getBlockName();

    // Make sure filesystem is in healthy state
    outStr = runFsck(conf, 0, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains("HEALTHY"));
   
    // corrupt replicas
    File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
    for (int i=0; i < 6; i++) {
      File blockFile = new File(baseDir, "data" + (i+1) + "/current/" +
                                block);
      if (blockFile.exists()) {
        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
        FileChannel channel = raFile.getChannel();
        String badString = "BADBAD";
        int rand = random.nextInt((int)channel.size()/2);
        raFile.seek(rand);
        raFile.write(badString.getBytes());
        raFile.close();
      }
    }
    // Read the file to trigger reportBadBlocks
    try {
      IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf,
                        true);
    } catch (IOException ie) {
      // Ignore exception
    }

    dfsClient = new DFSClient(new InetSocketAddress("localhost",
                               cluster.getNameNodePort()), conf);
    blocks = dfsClient.namenode.
               getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
    replicaCount = blocks.get(0).getLocations().length;
    while (replicaCount != 3) {
      try {
        Thread.sleep(100);
      } catch (InterruptedException ignore) {
      }
      blocks = dfsClient.namenode.
                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
      replicaCount = blocks.get(0).getLocations().length;
    }
    assertTrue (blocks.get(0).isCorrupt());

    // Check if fsck reports the same
    outStr = runFsck(conf, 1, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains("CORRUPT"));
View Full Code Here

        check(files[i], res);
      }
      return;
    }
    long fileLen = file.getLen();
    LocatedBlocks blocks = nn.namesystem.getBlockLocations(path, 0, fileLen);
    if (blocks == null) { // the file is deleted
      return;
    }
    isOpen = blocks.isUnderConstruction();
    if (isOpen && !showOpenFiles) {
      // We collect these stats about open files to report with default options
      res.totalOpenFilesSize += fileLen;
      res.totalOpenFilesBlocks += blocks.locatedBlockCount();
      res.totalOpenFiles++;
      return;
    }
    res.totalFiles++;
    res.totalSize += fileLen;
    res.totalBlocks += blocks.locatedBlockCount();
    if (showOpenFiles && isOpen) {
      out.print(path + " " + fileLen + " bytes, " +
        blocks.locatedBlockCount() + " block(s), OPENFORWRITE: ");
    } else if (showFiles) {
      out.print(path + " " + fileLen + " bytes, " +
        blocks.locatedBlockCount() + " block(s): ");
    } else {
      out.print('.');
    }
    if (res.totalFiles % 100 == 0) { out.println(); out.flush(); }
    int missing = 0;
    int corrupt = 0;
    long missize = 0;
    int underReplicatedPerFile = 0;
    int misReplicatedPerFile = 0;
    StringBuffer report = new StringBuffer();
    int i = 0;
    for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
      Block block = lBlk.getBlock();
      boolean isCorrupt = lBlk.isCorrupt();
      String blkName = block.toString();
      DatanodeInfo[] locs = lBlk.getLocations();
      res.totalReplicas += locs.length;
View Full Code Here

  /**
   * removes a file block in the specified stripe
   */
  private void removeFileBlock(Path filePath, int stripe, int blockInStripe)
    throws IOException {
    LocatedBlocks fileBlocks = dfs.getClient().namenode.
      getBlockLocations(filePath.toString(), 0, FILE_BLOCKS * BLOCK_SIZE);
    if (fileBlocks.locatedBlockCount() != FILE_BLOCKS) {
      throw new IOException("expected " + FILE_BLOCKS +
                            " file blocks but found " +
                            fileBlocks.locatedBlockCount());
    }
    if (blockInStripe >= STRIPE_BLOCKS) {
      throw new IOException("blockInStripe is " + blockInStripe +
                            " but must be smaller than " + STRIPE_BLOCKS);
    }
    LocatedBlock block = fileBlocks.get(stripe * STRIPE_BLOCKS + blockInStripe);
    removeAndReportBlock(dfs, filePath, block);
    LOG.info("removed file " + filePath.toString() + " block " +
             stripe * STRIPE_BLOCKS + " in stripe " + stripe);
  }
View Full Code Here

    if (parityBlockSize != BLOCK_SIZE) {
      throw new IOException("file block size is " + BLOCK_SIZE +
                            " but parity file block size is " +
                            parityBlockSize);
    }
    LocatedBlocks parityFileBlocks = parityDFS.getClient().namenode.
      getBlockLocations(parityPathStr, 0, parityFileLength);
    if (blockInStripe >= PARITY_BLOCKS) {
      throw new IOException("blockInStripe is " + blockInStripe +
                            " but must be smaller than " + PARITY_BLOCKS);
    }
    LocatedBlock parityFileBlock = parityFileBlocks.get(stripe * PARITY_BLOCKS + blockInStripe);
    removeAndReportBlock(parityDFS, new Path(parityPathStr), parityFileBlock);
    LOG.info("removed parity file block/stripe " + stripe + " for " + filePath.toString());

  }
View Full Code Here

  @Override
  public FSDataInputStream open(Path f, int bufferSize) throws IOException {
    // We want to use RAID logic only on instance of DFS.
    if (fs instanceof DistributedFileSystem) {
      DistributedFileSystem underlyingDfs = (DistributedFileSystem) fs;
      LocatedBlocks lbs =
          underlyingDfs.getLocatedBlocks(f, 0L, Long.MAX_VALUE);
      if (lbs != null) {
        // Use underlying filesystem if the file is under construction.
        if (!lbs.isUnderConstruction()) {
          // Use underlying filesystem if file length is 0.
          final long fileSize = getFileSize(lbs);
          if (fileSize > 0) {
            return new ExtFSDataInputStream(conf, this, f,
              fileSize, getBlockSize(lbs), bufferSize);
View Full Code Here

      DFSTestUtil.createFile(fs, src, 20, (short)2, 0L);
      DFSTestUtil.createFile(fs, parity, 11, (short)2, 0L);
      DFSTestUtil.waitReplication(fs, src, (short)2);
      DFSTestUtil.waitReplication(fs, parity, (short)2);

      LocatedBlocks srcLbs, parityLbs;
      List<BlockInfo> srcInfos, parityInfos;
      srcLbs = namenode.getBlockLocations(src.toString(), 4, 10);
      srcInfos = placementMonitor.getBlockInfos(fs, src, 4, 10);
      parityLbs = namenode.getBlockLocations(parity.toString(), 3, 7);
      parityInfos = placementMonitor.getBlockInfos(fs, parity, 3, 7);

      Assert.assertEquals(10, srcLbs.getLocatedBlocks().size());
      Assert.assertEquals(7, parityLbs.getLocatedBlocks().size());
      Assert.assertEquals(10, srcInfos.size());
      Assert.assertEquals(7, parityInfos.size());
     
      BlockAndDatanodeResolver resolver =
          new BlockAndDatanodeResolver(src, fs, parity, fs);
      for (int i = 0; i < srcInfos.size(); ++i) {
        LocatedBlock lb = resolver.getLocatedBlock(srcInfos.get(i));
        Assert.assertEquals(srcLbs.get(i).getBlock(), lb.getBlock());
        for (String nodeName : srcInfos.get(i).getNames()) {
          DatanodeInfo node = resolver.getDatanodeInfo(nodeName);
          Assert.assertEquals(node.getName(), nodeName);
        }
      }
View Full Code Here

      cnode.stop(); cnode.join();

      FileStatus file1Stat = fileSys.getFileStatus(file1);
      FileStatus file2Stat = fileSys.getFileStatus(file2);
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      LocatedBlocks file1Loc =
        RaidDFSUtil.getBlockLocations(dfs, file1.toUri().getPath(),
                                      0, file1Stat.getLen());
      LocatedBlocks file2Loc =
        RaidDFSUtil.getBlockLocations(dfs, file2.toUri().getPath(),
                                      0, file2Stat.getLen());

      String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
      assertEquals("no corrupt files expected", 0, corruptFiles.length);
      assertEquals("filesFixed() should return 0 before fixing files",
                   0, cnode.blockIntegrityMonitor.getNumFilesFixed());

      // corrupt file1
      int[] corruptBlockIdxs = new int[]{0, 1, 2, 3, 4, 6};
      for (int idx: corruptBlockIdxs)
        corruptBlock(file1Loc.get(idx).getBlock().getBlockName(), dfsCluster);
      RaidDFSUtil.reportCorruptBlocks(dfs, file1, corruptBlockIdxs, blockSize);
      cnode = RaidNode.createRaidNode(null, localConf);
      Thread.sleep(3000);
      Map<String, Long> result = cnode.getCorruptFileCounterMap();
      assertEquals("We expect 1 corrupt files",
          result.get("/user/dhruba/raidtest"), new Long(1L));
      assertEquals("We expect 0 corrupt files",
          result.get("/user/dhruba1"), new Long(0L));
      // corrupt file2
      for (int idx: corruptBlockIdxs)
        corruptBlock(file2Loc.get(idx).getBlock().getBlockName(), dfsCluster);
      RaidDFSUtil.reportCorruptBlocks(dfs, file2, corruptBlockIdxs, blockSize);
      Thread.sleep(3000);
      result = cnode.getCorruptFileCounterMap();
      assertEquals("We expect 2 corrupt files",
          result.get("/user/dhruba/raidtest"), new Long(2L));
View Full Code Here

    // generate the parity files.
    doRaid(srcPath, codec);

    FileStatus file1Stat = fileSys.getFileStatus(srcPath);
    long length = file1Stat.getLen();
    LocatedBlocks file1Loc =
        RaidDFSUtil.getBlockLocations((DistributedFileSystem)fileSys,
            srcPath.toUri().getPath(),
            0, length);
    // corrupt file1
   
    for (int idx: corruptBlockIdxs) {
      corruptBlock(file1Loc.get(idx).getBlock().getBlockName(),
                                dfs);
    }
    RaidDFSUtil.reportCorruptBlocks((DistributedFileSystem)fileSys, srcPath,
                         corruptBlockIdxs, blockSize);
   
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.LocatedBlocks

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.