Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$Packet


    teardown();
  }
 
  private String decommissionOneNode() throws IOException {
   
    DFSClient client = ((DistributedFileSystem)fileSys).getClient();
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);

    int index = 0;
    boolean found = false;
    while (!found) {
      index = rand.nextInt(info.length);
View Full Code Here


 
    @Override
    public void run() {
      boolean error = false;
      INodeFile node = null;
      DFSClient client = null;

      try {
        client = new DFSClient(conf);

  LOG.info("Trying to update lease for file at " + path);

        // verify that path exists in namespace
        node = fsNamesys.dir.getFileINode(path);
        if (node == null) {
          error = true;
        }
        if (!node.isUnderConstruction()) {
          error = true;
        }
      }
      catch (IOException e) {
        LOG.error(StringUtils.stringifyException(e));
        error = true;
      }

      // Could not find inode in FSNamespace, quit now
      if (error) {
        LOG.error("Couldn't update length for leased file at " + path +
                  " because file not in namespace");
  return;
      }

      BlockInfo[] blks = node.getBlocks();

      // If NN has not leased out any block, return
      if (blks.length == 0) return;

      int index = blks.length - 1; // index of last file block

      LOG.info("Block at index " + index + " being written for file at  " +
               path);

      // Pessimistically update last block length from DataNode.
      // File could have been renamed, and a new file created in its place.
      try {
        DFSInputStream stm = client.open(path);
        DFSLocatedBlocks locBlks = stm.fetchLocatedBlocks();

        if (locBlks.locatedBlockCount() >= blks.length) {
          if (blks[index] != null && locBlks.get(index) != null) {
            if (blks[index].getBlockId() == locBlks.get(index).getBlock().getBlockId()) {
              blks[index].setNumBytes(locBlks.get(index).getBlock().getNumBytes());
              return;
            }
          }
        }

        stm.close();
        client.close(); // close dfs client
      }
      catch (IOException e) {
        LOG.error(StringUtils.stringifyException(e));
      }
View Full Code Here

   * Get the saving of this code in bytes
   * @return The saving in bytes
   */
  public long getSaving() {
    try {
      DFSClient dfs = new DFSClient(new Configuration());
      Counters raidedCounters = stateToSourceCounters.get(RaidState.RAIDED);
      long physical = raidedCounters.getNumBytes() +
          parityCounters.getNumBytes();
      long logical = raidedCounters.getNumLogical();
      return logical * dfs.getDefaultReplication() - physical;
    } catch (Exception e) {
      return -1;
    }
  }
View Full Code Here

   * Get the estimated saving of this code in bytes when RAIDing is done
   * @return The saving in bytes
   */
  public long getDoneSaving() {
    try {
      DFSClient dfs = new DFSClient(new Configuration());
      Counters raidedCounters = stateToSourceCounters.get(RaidState.RAIDED);
      Counters shouldRaidCounters =
          stateToSourceCounters.get(RaidState.NOT_RAIDED_BUT_SHOULD);
      long physical = estimatedDoneSourceSize + estimatedDoneParitySize;
      long logical = raidedCounters.getNumLogical() +
          shouldRaidCounters.getNumLogical();
      return logical * dfs.getDefaultReplication() - physical;
    } catch (Exception e) {
      return -1;
    }
  }
View Full Code Here

            lastUpdate = now;
            synchronized (this) {
              // This obtain the datanodes from the HDFS cluster in config file.
              // If we need to support parity file in a different cluster, this
              // has to change.
              DFSClient client = new DFSClient(conf);
              liveNodes =
                  client.namenode.getDatanodeReport(DatanodeReportType.LIVE);
              for (DatanodeInfo n : liveNodes) {
                topology.add(n);
              }
View Full Code Here

  /** Get DFSClient for a namenode corresponding to the BPID from a datanode */
  public static DFSClient getDFSClient(final HttpServletRequest request,
    final Configuration conf) throws IOException, InterruptedException {
    final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS);
    return new DFSClient(DFSUtil.getSocketAddress(nnAddr), conf);
  }
View Full Code Here

    }
  }
 
  private void lostFoundMove(FileStatus file, LocatedBlocks blocks)
    throws IOException {
    final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf);
    try {
    if (!lfInited) {
      lostFoundInit(dfs);
    }
    if (!lfInitedOk) {
      return;
    }
    String target = lostFound + file.getPath();
    String errmsg = "Failed to move " + file.getPath() + " to /lost+found";
    try {
      PermissionStatus ps = new PermissionStatus(
          file.getOwner(), file.getGroup(), file.getPermission());
      if (!nn.namesystem.dir.mkdirs(target, ps, false, FSNamesystem.now())) {
        LOG.warn(errmsg);
        return;
      }
      // create chains
      int chain = 0;
      OutputStream fos = null;
      for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
        LocatedBlock lblock = lBlk;
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(target + "/" + chain, true);
          if (fos != null) chain++;
          else {
            LOG.warn(errmsg + ": could not store chain " + chain);
            // perhaps we should bail out here...
            // return;
            continue;
          }
        }
       
        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(dfs, lblock, fos);
        } catch (Exception e) {
          e.printStackTrace();
          // something went wrong copying this block...
          LOG.warn(" - could not copy block " + lblock.getBlock() + " to " + target);
          fos.flush();
          fos.close();
          fos = null;
        }
      }
      if (fos != null) fos.close();
      LOG.warn("\n - moved corrupted file " + file.getPath() + " to /lost+found");
      dfs.delete(file.getPath().toString(), true);
    catch (Exception e) {
      e.printStackTrace();
      LOG.warn(errmsg + ": " + e.getMessage());
    }
    } finally {
      dfs.close();
    }
  }
View Full Code Here

      create.write(testData.getBytes());
      create.hflush();
      long fileId = ((DFSOutputStream)create.
          getWrappedStream()).getFileId();
      FileStatus fileStatus = dfs.getFileStatus(filePath);
      DFSClient client = DFSClientAdapter.getClient(dfs);
      // add one dummy block at NN, but not write to DataNode
      ExtendedBlock previousBlock =
          DFSClientAdapter.getPreviousBlock(client, fileId);
      DFSClientAdapter.getNamenode(client).addBlock(
          pathString,
          client.getClientName(),
          new ExtendedBlock(previousBlock),
          new DatanodeInfo[0],
          DFSClientAdapter.getFileId((DFSOutputStream) create
              .getWrappedStream()), null);
      cluster.restartNameNode(0, true);
View Full Code Here

      final DataNode datanode = (DataNode) context.getAttribute("datanode");
      final Configuration conf =
        new HdfsConfiguration(datanode.getConf());
     
      try {
        final DFSClient dfs = DatanodeJspHelper.getDFSClient(request,
            datanode, conf, getUGI(request, conf));
        final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path, Long.MAX_VALUE);
        MD5MD5CRC32FileChecksum.write(xml, checksum);
      } catch(IOException ioe) {
        writeXml(ioe, path, xml);
      } catch (InterruptedException e) {
        writeXml(e, path, xml);
View Full Code Here

  @Test
  public void testDotdotInodePath() throws Exception {
    final Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    DFSClient client = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
      cluster.waitActive();
      final DistributedFileSystem hdfs = cluster.getFileSystem();
      final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();

      final Path dir = new Path("/dir");
      hdfs.mkdirs(dir);
      long dirId = fsdir.getINode(dir.toString()).getId();
      long parentId = fsdir.getINode("/").getId();
      String testPath = "/.reserved/.inodes/" + dirId + "/..";

      client = new DFSClient(NameNode.getAddress(conf), conf);
      HdfsFileStatus status = client.getFileInfo(testPath);
      assertTrue(parentId == status.getFileId());
     
      // Test root's parent is still root
      testPath = "/.reserved/.inodes/" + parentId + "/..";
      status = client.getFileInfo(testPath);
      assertTrue(parentId == status.getFileId());
     
    } finally {
      IOUtils.cleanup(LOG, client);
      if (cluster != null) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$Packet

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.