Examples of FSDataset


Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

      assertEquals(1, locations.locatedBlockCount());
      LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
      int successcount = 0;
      for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
        DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
        FSDataset dataset = (FSDataset)datanode.data;
        Block b = dataset.getStoredBlock(locatedblock.getBlock().getBlockId());
        File blockfile = dataset.findBlockFile(b.getBlockId());
        System.out.println("blockfile=" + blockfile);
        if (blockfile != null) {
          BufferedReader in = new BufferedReader(new FileReader(blockfile));
          assertEquals("something", in.readLine());
          in.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

    Assert.assertEquals(blocksize, lb.getBlockSize());
    final DatanodeInfo[] datanodeinfos = lb.getLocations();
    Assert.assertEquals(DATANODE_NUM, datanodeinfos.length);
    final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
    LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
    final FSDataset data = (FSDataset)dn.getFSDataset();
    final File blkfile = data.getBlockFile(blk);
    Assert.assertTrue(blkfile.delete());

    //read again by hftp, should get an exception
    LOG.info("hftpfs.getUri() = " + hftpfs.getUri());
    final ContentSummary cs = hftpfs.getContentSummary(filepath);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

                  dn.length == 1);

      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                  file1.toString(), 0, Long.MAX_VALUE);
      List<LocatedBlock> blocks = locations.getLocatedBlocks();
      FSDataset dataset = (FSDataset) dn[0].data;

      //
      // Create hard links for a few of the blocks
      //
      for (int i = 0; i < blocks.size(); i = i + 2) {
        Block b = blocks.get(i).getBlock();
        File f = dataset.getFile(b);
        File link = new File(f.toString() + ".link");
        System.out.println("Creating hardlink for File " + f + " to " + link);
        HardLink.createHardLink(f, link);
      }

      //
      // Detach all blocks. This should remove hardlinks (if any)
      //
      for (int i = 0; i < blocks.size(); i++) {
        Block b = blocks.get(i).getBlock();
        System.out.println("testCopyOnWrite detaching block " + b);
        assertTrue("Detaching block " + b + " should have returned true",
            dataset.unlinkBlock(b, 1));
      }

      // Since the blocks were already detached earlier, these calls should
      // return false
      //
      for (int i = 0; i < blocks.size(); i++) {
        Block b = blocks.get(i).getBlock();
        System.out.println("testCopyOnWrite detaching block " + b);
        assertTrue("Detaching block " + b + " should have returned false",
            !dataset.unlinkBlock(b, 1));
      }

    } finally {
      fs.close();
      cluster.shutdown();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

    assertEquals(len1, lb.getBlockSize());

    DatanodeInfo[] datanodeinfos = lb.getLocations();
    assertEquals(repl, datanodeinfos.length);
    final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
    final FSDataset data = (FSDataset)dn.getFSDataset();
    final RandomAccessFile raf = new RandomAccessFile(data.getBlockFile(blk), "rw");
    AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
    assertEquals(len1, raf.length());
    raf.setLength(0);
    raf.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

  static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException {
    List<File> files = new ArrayList<File>();
    List<DataNode> datanodes = cluster.getDataNodes();
    Iterable<Block>[] blocks = cluster.getAllBlockReports();
    for(int i = 0; i < blocks.length; i++) {
      FSDataset ds = (FSDataset)datanodes.get(i).getFSDataset();
      for(Block b : blocks[i]) {
        files.add(ds.getBlockFile(b));
      }       
    }
    return files;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

      assertEquals(1, locations.locatedBlockCount());
      LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
      int nsId = cluster.getNameNode().getNamespaceID();
      for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
        DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
        FSDataset dataset = (FSDataset)datanode.data;
        Block b = dataset.getStoredBlock(nsId, locatedblock.getBlock().getBlockId());
        Block newBlock = new Block(b);
        newBlock.setGenerationStamp(6661);
        dataset.updateBlock(nsId, b, newBlock);

        Block newBlock1 = new Block(b);
        newBlock1.setGenerationStamp(6662);
        boolean hitException = false;
        try {
          dataset.updateBlock(nsId, b, newBlock1);
        } catch (IOException e) {
          hitException = true;
        }
        TestCase.assertTrue("Shouldn't allow update block when generation doesn't match", hitException);
        dataset.updateBlock(nsId, newBlock, newBlock1);
      }
    } finally {
      IOUtils.closeStream(dfs);
      cluster.shutdown();
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

                  dn.length == 1);

      LocatedBlocks locations = client.namenode.getBlockLocations(
                                  file1.toString(), 0, Long.MAX_VALUE);
      List<LocatedBlock> blocks = locations.getLocatedBlocks();
      FSDataset dataset = (FSDataset) dn[0].data;

      //
      // Create hard links for a few of the blocks
      //
      for (int i = 0; i < blocks.size(); i = i + 2) {
        Block b = (Block) blocks.get(i).getBlock();
        FSDataset fsd = (FSDataset) dataset;
        File f = fsd.getFile(nsId, b);
        File link = new File(f.toString() + ".link");
        System.out.println("Creating hardlink for File " + f +
                           " to " + link);
        HardLink.createHardLink(f, link);
      }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

    assertEquals(len1, lb.getBlockSize());

    DatanodeInfo[] datanodeinfos = lb.getLocations();
    assertEquals(repl, datanodeinfos.length);
    final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
    final FSDataset data = (FSDataset)dn.getFSDataset();
    int nsId = cluster.getNameNode().getNamespaceID();
    final RandomAccessFile raf = new RandomAccessFile(data.getBlockFile(nsId, blk), "rw");
    AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
    assertEquals(len1, raf.length());
    raf.setLength(0);
    raf.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

      LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
      int successcount = 0;
      int nsId = cluster.getNameNode().getNamespaceID();
      for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
        DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
        FSDataset dataset = (FSDataset)datanode.data;
        Block b = dataset.getStoredBlock(nsId, locatedblock.getBlock().getBlockId());
        File blockfile = dataset.findBlockFile(nsId, b.getBlockId());
        System.out.println("blockfile=" + blockfile);
        if (blockfile != null) {
          BufferedReader in = new BufferedReader(new FileReader(blockfile));
          assertEquals("something", in.readLine());
          in.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset

    super.tearDown();
  }

  public void testParallelCheckDirs() throws Exception {
    final DataNode datanode = cluster.getDataNodes().get(0);
    FSDataset fsDataset = (FSDataset) datanode.data;
    datanode.data = spy(fsDataset);

    final Method checkDiskMethod = DataNode.class.getDeclaredMethod(
        "checkDiskError", Exception.class);
    checkDiskMethod.setAccessible(true);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.