Examples of FSDataset


Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

    assertEquals(len1, lb.getBlockSize());

    DatanodeInfo[] datanodeinfos = lb.getLocations();
    assertEquals(repl, datanodeinfos.length);
    final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
    final FSDataset data = (FSDataset)dn.getFSDataset();
    final RandomAccessFile raf = new RandomAccessFile(data.getBlockFile(blk), "rw");
    AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
    assertEquals(len1, raf.length());
    raf.setLength(0);
    raf.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

                  dn.length == 1);

      LocatedBlocks locations = client.namenode.getBlockLocations(
                                  file1.toString(), 0, Long.MAX_VALUE);
      List<LocatedBlock> blocks = locations.getLocatedBlocks();
      FSDataset dataset = (FSDataset) dn[0].data;

      //
      // Create hard links for a few of the blocks
      //
      for (int i = 0; i < blocks.size(); i = i + 2) {
        Block b = (Block) blocks.get(i).getBlock();
        FSDataset fsd = (FSDataset) dataset;
        File f = fsd.getFile(b);
        File link = new File(f.toString() + ".link");
        System.out.println("Creating hardlink for File " + f +
                           " to " + link);
        HardLink.createHardLink(f, link);
      }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

    Assert.assertEquals(blocksize, lb.getBlockSize());
    final DatanodeInfo[] datanodeinfos = lb.getLocations();
    Assert.assertEquals(DATANODE_NUM, datanodeinfos.length);
    final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
    LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
    final FSDataset data = (FSDataset)dn.getFSDataset();
    final File blkfile = data.getBlockFile(blk);
    Assert.assertTrue(blkfile.delete());

    //read again by hftp, should get an exception
    LOG.info("hftpfs.getUri() = " + hftpfs.getUri());
    final ContentSummary cs = hftpfs.getContentSummary(filepath);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

    assertEquals(len1, lb.getBlockSize());

    DatanodeInfo[] datanodeinfos = lb.getLocations();
    assertEquals(repl, datanodeinfos.length);
    final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
    final FSDataset data = (FSDataset)dn.getFSDataset();
    final RandomAccessFile raf = new RandomAccessFile(data.getBlockFile(blk), "rw");
    AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
    assertEquals(len1, raf.length());
    raf.setLength(0);
    raf.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

      assertEquals(1, locations.locatedBlockCount());
      LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
      int successcount = 0;
      for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
        DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
        FSDataset dataset = (FSDataset)datanode.data;
        ExtendedBlock blk = locatedblock.getBlock();
        Block b = dataset.getStoredBlock(blk.getBlockPoolId(), blk.getBlockId());
        File blockfile = dataset.findBlockFile(blk.getBlockPoolId(), b.getBlockId());
        System.out.println("blockfile=" + blockfile);
        if (blockfile != null) {
          BufferedReader in = new BufferedReader(new FileReader(blockfile));
          assertEquals("something", in.readLine());
          in.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

   * @throws FileNotFoundException
   * @throws IOException
   */
  private static void corruptBlock(final ExtendedBlock block, final DataNode dn)
      throws FileNotFoundException, IOException {
    final FSDataset data = (FSDataset) dn.getFSDataset();
    final RandomAccessFile raFile = new RandomAccessFile(
        data.getBlockFile(block), "rw");
    final byte[] bytes = new byte[(int) BLOCK_SIZE];
    for (int i = 0; i < BLOCK_SIZE; i++) {
      bytes[i] = (byte) (i);
    }
    raFile.write(bytes);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

                  dn.length == 1);

      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                  file1.toString(), 0, Long.MAX_VALUE);
      List<LocatedBlock> blocks = locations.getLocatedBlocks();
      FSDataset dataset = (FSDataset) dn[0].data;

      //
      // Create hard links for a few of the blocks
      //
      for (int i = 0; i < blocks.size(); i = i + 2) {
        ExtendedBlock b = blocks.get(i).getBlock();
        File f = dataset.getFile(b.getBlockPoolId(), b.getLocalBlock());
        File link = new File(f.toString() + ".link");
        System.out.println("Creating hardlink for File " + f + " to " + link);
        HardLink.createHardLink(f, link);
      }

      //
      // Detach all blocks. This should remove hardlinks (if any)
      //
      for (int i = 0; i < blocks.size(); i++) {
        ExtendedBlock b = blocks.get(i).getBlock();
        System.out.println("testCopyOnWrite detaching block " + b);
        assertTrue("Detaching block " + b + " should have returned true",
            dataset.unlinkBlock(b, 1));
      }

      // Since the blocks were already detached earlier, these calls should
      // return false
      //
      for (int i = 0; i < blocks.size(); i++) {
        ExtendedBlock b = blocks.get(i).getBlock();
        System.out.println("testCopyOnWrite detaching block " + b);
        assertTrue("Detaching block " + b + " should have returned false",
            !dataset.unlinkBlock(b, 1));
      }

    } finally {
      fs.close();
      cluster.shutdown();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

    List<File> files = new ArrayList<File>();
    List<DataNode> datanodes = cluster.getDataNodes();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    Iterable<Block>[] blocks = cluster.getAllBlockReports(poolId);
    for(int i = 0; i < blocks.length; i++) {
      FSDataset ds = (FSDataset)datanodes.get(i).getFSDataset();
      for(Block b : blocks[i]) {
        files.add(ds.getBlockFile(poolId, b));
      }       
    }
    return files;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

      assertEquals(1, locations.locatedBlockCount());
      LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
      int successcount = 0;
      for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
        DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
        FSDataset dataset = (FSDataset)datanode.data;
        Block b = dataset.getStoredBlock(locatedblock.getBlock().getBlockId());
        File blockfile = dataset.findBlockFile(b.getBlockId());
        System.out.println("blockfile=" + blockfile);
        if (blockfile != null) {
          BufferedReader in = new BufferedReader(new FileReader(blockfile));
          assertEquals("something", in.readLine());
          in.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

    assertEquals(len1, lb.getBlockSize());

    DatanodeInfo[] datanodeinfos = lb.getLocations();
    assertEquals(repl, datanodeinfos.length);
    final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
    final FSDataset data = (FSDataset)dn.getFSDataset();
    final RandomAccessFile raf = new RandomAccessFile(data.getBlockFile(blk), "rw");
    AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
    assertEquals(len1, raf.length());
    raf.setLength(0);
    raf.close();
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.