Examples of HdfsDataInputStream


Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    // hflush file
    AppendTestUtil.LOG.info("hflush");
    stm.hflush();
   
    // check visible length
    final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
    Assert.assertEquals(size, in.getVisibleLength());
    in.close();
   
    if (doRename) {
      fileStr += ".renamed";
      Path renamedPath = new Path(fileStr);
      assertTrue(dfs.rename(filePath, renamedPath));
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    switch(op.getValue()) {
    case OPEN:
    {
      final int b = bufferSize.getValue(conf);
      final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
      HdfsDataInputStream in = null;
      try {
        in = new HdfsDataInputStream(dfsclient.open(fullpath, b, true));
        in.seek(offset.getValue());
      } catch(IOException ioe) {
        IOUtils.cleanup(LOG, in);
        IOUtils.cleanup(LOG, dfsclient);
        throw ioe;
      }
      final HdfsDataInputStream dis = in;
      final StreamingOutput streaming = new StreamingOutput() {
        @Override
        public void write(final OutputStream out) throws IOException {
          final Long n = length.getValue();
          HdfsDataInputStream dfsin = dis;
          DFSClient client = dfsclient;
          try {
            if (n == null) {
              IOUtils.copyBytes(dfsin, out, b);
            } else {
              IOUtils.copyBytes(dfsin, out, n, false);
            }
            dfsin.close();
            dfsin = null;
            dfsclient.close();
            client = null;
          } finally {
            IOUtils.cleanup(LOG, dfsin);
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    fs.delete(root, true);
    files = null;
  }
 
  public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
    HdfsDataInputStream in = (HdfsDataInputStream)((DistributedFileSystem)fs).open(path);
    in.readByte();
    return in.getCurrentBlock();
 
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    String downnode, int numDatanodes) throws IOException {
    boolean isNodeDown = (downnode != null);
    // need a raw stream
    assertTrue("Not HDFS:"+fileSys.getUri(),
        fileSys instanceof DistributedFileSystem);
    HdfsDataInputStream dis = (HdfsDataInputStream)
        ((DistributedFileSystem)fileSys).open(name);
    Collection<LocatedBlock> dinfo = dis.getAllBlocks();
    for (LocatedBlock blk : dinfo) { // for each block
      int hasdown = 0;
      DatanodeInfo[] nodes = blk.getLocations();
      for (int j = 0; j < nodes.length; j++) { // for each replica
        if (isNodeDown && nodes[j].getXferAddr().equals(downnode)) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

          "Input streams must be types of HdfsDataInputStream");
   
    LocatedBlock lblocks[] = new LocatedBlock[2];

    // Find block in data stream.
    HdfsDataInputStream dfsIn = (HdfsDataInputStream) in;
    ExtendedBlock dataBlock = dfsIn.getCurrentBlock();
    if (dataBlock == null) {
      LOG.error("Error: Current block in data stream is null! ");
      return false;
    }
    DatanodeInfo[] dataNode = {dfsIn.getCurrentDatanode()};
    lblocks[0] = new LocatedBlock(dataBlock, dataNode);
    LOG.info("Found checksum error in data stream at block="
        + dataBlock + " on datanode="
        + dataNode[0]);

    // Find block in checksum stream
    HdfsDataInputStream dfsSums = (HdfsDataInputStream) sums;
    ExtendedBlock sumsBlock = dfsSums.getCurrentBlock();
    if (sumsBlock == null) {
      LOG.error("Error: Current block in checksum stream is null! ");
      return false;
    }
    DatanodeInfo[] sumsNode = {dfsSums.getCurrentDatanode()};
    lblocks[1] = new LocatedBlock(sumsBlock, sumsNode);
    LOG.info("Found checksum error in checksum stream at block="
        + sumsBlock + " on datanode=" + sumsNode[0]);

    // Ask client to delete blocks.
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
    if (legacyShortCircuitFails) {
      assertTrue(fs.getClient().useLegacyBlockReaderLocal());
    }
   
    HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

    IOUtils.skipFully(stm, readOffset);

    actual.limit(3);

    //Read a small number of bytes first.
    int nread = stm.read(actual);
    actual.limit(nread + 2);
    nread += stm.read(actual);

    // Read across chunk boundary
    actual.limit(Math.min(actual.capacity(), nread + 517));
    nread += stm.read(actual);
    checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
        "A few bytes");
    //Now read rest of it
    actual.limit(actual.capacity());
    while (actual.hasRemaining()) {
      int nbytes = stm.read(actual);

      if (nbytes < 0) {
        throw new EOFException("End of file reached before reading fully.");
      }
      nread += nbytes;
    }
    checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
    if (legacyShortCircuitFails) {
      assertFalse(fs.getClient().useLegacyBlockReaderLocal());
    }
    stm.close();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    }
  }

  private boolean checkUnsupportedMethod(FileSystem fs, Path file,
                                           byte[] expected, int readOffset) throws IOException {
    HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
    IOUtils.skipFully(stm, readOffset);
    try {
      stm.read(actual);
    } catch(UnsupportedOperationException unex) {
      return true;
    }
    return false;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    // hflush file
    AppendTestUtil.LOG.info("hflush");
    stm.hflush();
   
    // check visible length
    final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
    Assert.assertEquals(size, in.getVisibleLength());
    in.close();
   
    if (doRename) {
      fileStr += ".renamed";
      Path renamedPath = new Path(fileStr);
      assertTrue(dfs.rename(filePath, renamedPath));
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    switch(op.getValue()) {
    case OPEN:
    {
      final int b = bufferSize.getValue(conf);
      final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
      HdfsDataInputStream in = null;
      try {
        in = new HdfsDataInputStream(dfsclient.open(fullpath, b, true));
        in.seek(offset.getValue());
      } catch(IOException ioe) {
        IOUtils.cleanup(LOG, in);
        IOUtils.cleanup(LOG, dfsclient);
        throw ioe;
      }
     
      final long n = length.getValue() != null ?
        Math.min(length.getValue(), in.getVisibleLength() - offset.getValue()) :
        in.getVisibleLength() - offset.getValue();

      /**
       * Allow the Web UI to perform an AJAX request to get the data.
       */
      return Response.ok(new OpenEntity(in, n, dfsclient))
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    fs.delete(root, true);
    files = null;
  }
 
  public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
    HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
    in.readByte();
    return in.getCurrentBlock();
 
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.