Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks


      stm3.close();
      stm4.close();

      // verify that new block is associated with this file
      DFSClient client = fs.dfs;
      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                  file1.toString(), 0, Long.MAX_VALUE);
      System.out.println("locations = " + locations.locatedBlockCount());
      assertTrue("Error blocks were not cleaned up for file " + file1,
                 locations.locatedBlockCount() == 3);

      // verify filestatus2.dat
      locations = client.getNamenode().getBlockLocations(
                                  file2.toString(), 0, Long.MAX_VALUE);
      System.out.println("locations = " + locations.locatedBlockCount());
      assertTrue("Error blocks were not cleaned up for file " + file2,
                 locations.locatedBlockCount() == 1);
    } finally {
      IOUtils.closeStream(fs);
      cluster.shutdown();
    }
  }
View Full Code Here


      // namenode triggers lease recovery
      cluster.setLeasePeriod(leasePeriod, leasePeriod);
      // wait for the lease to expire
      try {Thread.sleep(5 * leasePeriod);} catch (InterruptedException e) {}

      LocatedBlocks locations = dfs.dfs.getNamenode().getBlockLocations(
          f, 0, Long.MAX_VALUE);
      assertEquals(1, locations.locatedBlockCount());
      LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
      int successcount = 0;
      for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
        DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort());
        ExtendedBlock blk = locatedblock.getBlock();
        Block b = DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(
View Full Code Here

   * @see ClientProtocol#getBlockLocations(String, long, long)
   */
  LocatedBlocks getBlockLocations(String clientMachine, String src,
      long offset, long length) throws AccessControlException,
      FileNotFoundException, UnresolvedLinkException, IOException {
    LocatedBlocks blocks = getBlockLocations(src, offset, length, true, true,
        true);
    if (blocks != null) {
      blockManager.getDatanodeManager().sortLocatedBlocks(
          clientMachine, blocks.getLocatedBlocks());
     
      // lastBlock is not part of getLocatedBlocks(), might need to sort it too
      LocatedBlock lastBlock = blocks.getLastLocatedBlock();
      if (lastBlock != null) {
        ArrayList<LocatedBlock> lastBlockList =
            Lists.newArrayListWithCapacity(1);
        lastBlockList.add(lastBlock);
        blockManager.getDatanodeManager().sortLocatedBlocks(
View Full Code Here

    }
    if (length < 0) {
      throw new HadoopIllegalArgumentException(
          "Negative length is not supported. File: " + src);
    }
    final LocatedBlocks ret = getBlockLocationsUpdateTimes(src,
        offset, length, doAccessTime, needBlockToken)
    logAuditEvent(true, "open", src);
    if (checkSafeMode && isInSafeMode()) {
      for (LocatedBlock b : ret.getLocatedBlocks()) {
        // if safemode & no block locations yet then throw safemodeException
        if ((b.getLocations() == null) || (b.getLocations().length == 0)) {
          SafeModeException se = new SafeModeException(
              "Zero blocklocations for " + src, safeMode);
          if (haEnabled && haContext != null &&
View Full Code Here

          // if src indicates a snapshot file, we need to make sure the returned
          // blocks do not exceed the size of the snapshot file.
          length = Math.min(length, fileSize - offset);
          isUc = false;
        }
        LocatedBlocks blocks =
          blockManager.createLocatedBlocks(inode.getBlocks(), fileSize,
            isUc, offset, length, needBlockToken, iip.isSnapshot());
        // Set caching information for the located blocks.
        for (LocatedBlock lb: blocks.getLocatedBlocks()) {
          cacheManager.setCachedLocations(lb);
        }
        return blocks;
      } finally {
        if (isReadOp) {
View Full Code Here

      fileSys = dfs.getFileSystem();
      Path file1 = new Path("/user/dhruba/raidtest/file1");
      createOldFile(fileSys, file1, 1, numBlocks, blockSize);

      // extract block locations from File system. Wait till file is closed.
      LocatedBlocks locations = null;
      locations = fileSys.dfs.getNamenode().getBlockLocations(file1.toString(),
          0, numBlocks * blockSize);
      // remove block of file
      LOG.info("Remove first block of file");
      corruptBlock(file1, locations.get(0).getBlock());

      // validate that the system throws BlockMissingException
      validateFile(fileSys, file1);
    } finally {
      if (fileSys != null) fileSys.close();
View Full Code Here

    }
    out.hsync();
    // abort the original stream
    ((DFSOutputStream) out.getWrappedStream()).abort();

    LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
        file.toString(), 0, count);
    ExtendedBlock block = locations.get(0).getBlock();
    DataNode dn = cluster.getDataNodes().get(0);
    BlockLocalPathInfo localPathInfo = dn.getBlockLocalPathInfo(block, null);
    File metafile = new File(localPathInfo.getMetaPath());
    assertTrue(metafile.exists());
View Full Code Here

  synchronized void openInfo() throws IOException {
    if (src == null && blocks == null) {
      throw new IOException("No fine provided to open");
    }

    LocatedBlocks newInfo = src != null ?
                            getLocatedBlocks(src, 0, prefetchSize) : blocks;
    if (newInfo == null) {
      throw new IOException("Cannot open filename " + src);
    }

    // I think this check is not correct. A file could have been appended to
    // between two calls to openInfo().
    if (locatedBlocks != null && !locatedBlocks.isUnderConstruction() &&
        !newInfo.isUnderConstruction()) {
      Iterator<LocatedBlock> oldIter = locatedBlocks.getLocatedBlocks().iterator();
      Iterator<LocatedBlock> newIter = newInfo.getLocatedBlocks().iterator();
      while (oldIter.hasNext() && newIter.hasNext()) {
        if (! oldIter.next().getBlock().equals(newIter.next().getBlock())) {
          throw new IOException("Blocklist for " + src + " has changed!");
        }
      }
    }

    // if the file is under construction, then fetch size of last block
    // from datanode.
    if (newInfo.isUnderConstruction() && newInfo.locatedBlockCount() > 0) {
      LocatedBlock last = newInfo.get(newInfo.locatedBlockCount()-1);
      if (last.getLocations().length > 0) {
        try {
          Block newBlock = getBlockInfo(last);
          // only if the block has data (not null)
          if (newBlock != null) {
            long newBlockSize = newBlock.getNumBytes();
            newInfo.setLastBlockSize(newBlock.getBlockId(), newBlockSize);
          }
        } catch (IOException e) {
          DFSClient.LOG.debug("DFSClient file " + src +
                    " is being concurrently append to" +
                    " but datanodes probably does not have block " +
View Full Code Here

          dfsClient.updateDataTransferProtocolVersionIfNeeded(locs.getDataProtocolVersion());
        }
        checkLocatedBlocks(locs);
        return locs;
      } else {
        LocatedBlocks locs = dfsClient.namenode.getBlockLocations(src, start, length);
        checkLocatedBlocks(locs);
        return locs;
      }
    } catch(RemoteException re) {
      throw re.unwrapRemoteException(AccessControlException.class,
View Full Code Here

    assert (locatedBlocks != null) : "locatedBlocks is null";
    // search cached blocks first
    LocatedBlock blk = locatedBlocks.getBlockContainingOffset(offset);
    if (blk == null) { // block is not cached
      // fetch more blocks
      LocatedBlocks newBlocks;
      newBlocks = getLocatedBlocks(src, offset, prefetchSize);
      if (newBlocks == null) {
        if (!throwWhenNotFound) {
          return null;
        }
        throw new IOException("Could not find target position " + offset);
      }
      locatedBlocks.insertRange(newBlocks.getLocatedBlocks());
      locatedBlocks.setFileLength(newBlocks.getFileLength());
    }
    blk = locatedBlocks.getBlockContainingOffset(offset);
    if (blk == null) {
      if (!throwWhenNotFound) {
        return null;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.LocatedBlocks

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.