Examples of releaseBuffer()


Examples of org.apache.hadoop.fs.FSDataInputStream.releaseBuffer()

      DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
     
      fsIn = fs.open(TEST_PATH);
      buf1 = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(1, buf1.remaining());
      fsIn.releaseBuffer(buf1);
      buf1 = null;
      fsIn.seek(2147483640L);
      buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(7, buf1.remaining());
      Assert.assertEquals(Integer.MAX_VALUE, buf1.limit());
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataInputStream.releaseBuffer()

      buf1 = null;
      fsIn.seek(2147483640L);
      buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(7, buf1.remaining());
      Assert.assertEquals(Integer.MAX_VALUE, buf1.limit());
      fsIn.releaseBuffer(buf1);
      buf1 = null;
      Assert.assertEquals(2147483647L, fsIn.getPos());
      try {
        buf1 = fsIn.read(null, 1024,
            EnumSet.of(ReadOption.SKIP_CHECKSUMS));
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataInputStream.releaseBuffer()

      Assert.assertEquals(2147484672L, fsIn2.getPos());
      fsIn2.releaseBuffer(buf2);
      buf2 = null;
    } finally {
      if (buf1 != null) {
        fsIn.releaseBuffer(buf1);
      }
      if (buf2 != null) {
        fsIn2.releaseBuffer(buf2);
      }
      IOUtils.cleanup(null, fsIn, fsIn2);
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream.releaseBuffer()

          dfsIn.getReadStatistics().getTotalBytesRead());
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096),
          byteBufferToArray(result));
      dfsIn.releaseBuffer(result);
     
      // Try to read 4097, but only get 4096 because of the block size.
      result =
          dfsIn.read(null, 4097, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(4096, result.remaining());
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream.releaseBuffer()

      result =
          dfsIn.read(null, 4097, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(4096, result.remaining());
      Assert.assertArrayEquals(Arrays.copyOfRange(original, 4096, 8192),
          byteBufferToArray(result));
      dfsIn.releaseBuffer(result);
    } finally {
      if (fsIn != null) fsIn.close();
      if (fs != null) fs.close();
      if (cluster != null) cluster.shutdown();
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream.releaseBuffer()

          dfsIn.getReadStatistics().getTotalBytesRead());
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096),
          byteBufferToArray(result));
      dfsIn.releaseBuffer(result);
     
      // Try to read 4097, but only get 4096 because of the block size.
      result =
          dfsIn.read(null, 4097, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(4096, result.remaining());
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream.releaseBuffer()

      result =
          dfsIn.read(null, 4097, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(4096, result.remaining());
      Assert.assertArrayEquals(Arrays.copyOfRange(original, 4096, 8192),
          byteBufferToArray(result));
      dfsIn.releaseBuffer(result);
    } finally {
      if (fsIn != null) fsIn.close();
      if (fs != null) fs.close();
      if (cluster != null) cluster.shutdown();
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream.releaseBuffer()

          dfsIn.getReadStatistics().getTotalBytesRead());
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096),
          byteBufferToArray(result));
      dfsIn.releaseBuffer(result);
     
      // Try to read 4097, but only get 4096 because of the block size.
      result =
          dfsIn.read(null, 4097, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(4096, result.remaining());
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream.releaseBuffer()

      result =
          dfsIn.read(null, 4097, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(4096, result.remaining());
      Assert.assertArrayEquals(Arrays.copyOfRange(original, 4096, 8192),
          byteBufferToArray(result));
      dfsIn.releaseBuffer(result);
    } finally {
      if (fsIn != null) fsIn.close();
      if (fs != null) fs.close();
      if (cluster != null) cluster.shutdown();
    }
View Full Code Here

Examples of org.apache.qpid.transport.codec.BBDecoder.releaseBuffer()

            break;
        default:
            throw new IllegalStateException("unknown frame type: " + frame.getType());
        }

        dec.releaseBuffer();
    }

    private void setIncompleteCommand(int channelId, Method incomplete)
    {
        if ((channelId & ARRAY_SIZE) == channelId)
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.