Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.ClientContext


      int readOffset, String readingUser, Configuration conf,
      boolean legacyShortCircuitFails)
      throws IOException, InterruptedException {
    // Ensure short circuit is enabled
    DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
    ClientContext getClientContext = ClientContext.getFromConf(conf);
    if (legacyShortCircuitFails) {
      assertFalse(getClientContext.getDisableLegacyBlockReaderLocal());
    }
   
    FSDataInputStream stm = fs.open(name);
    byte[] actual = new byte[expected.length-readOffset];
    stm.readFully(readOffset, actual);
    checkData(actual, readOffset, expected, "Read 2");
    stm.close();
    // Now read using a different API.
    actual = new byte[expected.length-readOffset];
    stm = fs.open(name);
    IOUtils.skipFully(stm, readOffset);
    //Read a small number of bytes first.
    int nread = stm.read(actual, 0, 3);
    nread += stm.read(actual, nread, 2);
    //Read across chunk boundary
    nread += stm.read(actual, nread, 517);
    checkData(actual, readOffset, expected, nread, "A few bytes");
    //Now read rest of it
    while (nread < actual.length) {
      int nbytes = stm.read(actual, nread, actual.length - nread);
      if (nbytes < 0) {
        throw new EOFException("End of file reached before reading fully.");
      }
      nread += nbytes;
    }
    checkData(actual, readOffset, expected, "Read 3");
   
    if (legacyShortCircuitFails) {
      assertTrue(getClientContext.getDisableLegacyBlockReaderLocal());
    }
    stm.close();
  }
View Full Code Here


      int readOffset, String readingUser, Configuration conf,
      boolean legacyShortCircuitFails)
      throws IOException, InterruptedException {
    // Ensure short circuit is enabled
    DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
    ClientContext clientContext = ClientContext.getFromConf(conf);
    if (legacyShortCircuitFails) {
      assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
    }
   
    HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

    IOUtils.skipFully(stm, readOffset);

    actual.limit(3);

    //Read a small number of bytes first.
    int nread = stm.read(actual);
    actual.limit(nread + 2);
    nread += stm.read(actual);

    // Read across chunk boundary
    actual.limit(Math.min(actual.capacity(), nread + 517));
    nread += stm.read(actual);
    checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
        "A few bytes");
    //Now read rest of it
    actual.limit(actual.capacity());
    while (actual.hasRemaining()) {
      int nbytes = stm.read(actual);

      if (nbytes < 0) {
        throw new EOFException("End of file reached before reading fully.");
      }
      nread += nbytes;
    }
    checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
    if (legacyShortCircuitFails) {
      assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
    }
    stm.close();
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.ClientContext

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.