Examples of fetchLocatedBlocks()


Examples of org.apache.hadoop.hdfs.DFSInputStream.fetchLocatedBlocks()

      // Pessimistically update last block length from DataNode.
      // File could have been renamed, and a new file created in its place.
      try {
        DFSInputStream stm = client.open(path);
        DFSLocatedBlocks locBlks = stm.fetchLocatedBlocks();

        if (locBlks.locatedBlockCount() >= blks.length) {
          if (blks[index] != null && locBlks.get(index) != null) {
            if (blks[index].getBlockId() == locBlks.get(index).getBlock().getBlockId()) {
              blks[index].setNumBytes(locBlks.get(index).getBlock().getNumBytes());
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream.fetchLocatedBlocks()

    blocksArr = ssProtocol.getLocatedBlocks("test", "/bar/woot");
    assertTrue(blocksArr.length == 1); // 1 file

    stm = client.open("/bar/woot");
    locBlks = blocksArr[0];
    dfsLocBlks = stm.fetchLocatedBlocks();
    stm.close();

    assertTrue(locBlks.locatedBlockCount() == 1); // one byte so must be one block
    assertTrue(locBlks.locatedBlockCount() == dfsLocBlks.locatedBlockCount());
    assertTrue(locBlks.get(0).getBlock().getBlockId() ==
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream.fetchLocatedBlocks()

      // Pessimistically update last block length from DataNode.
      // File could have been renamed, and a new file created in its place.
      try {
        DFSInputStream stm = client.open(path);
        DFSLocatedBlocks locBlks = stm.fetchLocatedBlocks();

        if (locBlks.locatedBlockCount() >= blks.length) {
          if (blks[index] != null && locBlks.get(index) != null) {
            if (blks[index].getBlockId() == locBlks.get(index).getBlock().getBlockId()) {
              blks[index].setNumBytes(locBlks.get(index).getBlock().getNumBytes());
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream.fetchLocatedBlocks()

        if (!child.isDir()) { // get block ids for file
          Path path = child.getPath(); // paths will be unique
          fileMap.put(path, new ArrayList<Long>());

          DFSInputStream stm = client.open(child.getPath().toUri().getPath());
          LocatedBlocks blocks = stm.fetchLocatedBlocks();
          stm.close();

          for (int i = 0; i < blocks.locatedBlockCount(); i++) {
            Long blockId = blocks.get(i).getBlock().getBlockId();
            fileMap.get(path).add(blockId); // add to file block list
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream.fetchLocatedBlocks()

    assertTrue(blocksArr.length == 1); // 1 file

    DFSClient client = new DFSClient(conf);
    DFSInputStream stm = client.open("/bar/foo");
    LocatedBlocks locBlks = blocksArr[0];
    DFSLocatedBlocks dfsLocBlks = stm.fetchLocatedBlocks();
    stm.close();

    assertTrue(locBlks.locatedBlockCount() == 1); // one byte so must be one block
    assertTrue(locBlks.locatedBlockCount() == dfsLocBlks.locatedBlockCount());
    assertTrue(locBlks.get(0).getBlock().getBlockId() ==
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.