Examples of Decompressor


Examples of org.apache.hadoop.io.compress.Decompressor

     * @throws IOException
     */
    private ByteBuffer decompress(final long offset, final int compressedSize,
      final int decompressedSize)
    throws IOException {
      Decompressor decompressor = null;
      ByteBuffer buf = null;
      try {
        decompressor = this.compressAlgo.getDecompressor();
        // My guess is that the bounded range fis is needed to stop the
        // decompressor reading into next block -- IIRC, it just grabs a
View Full Code Here

Examples of org.apache.hadoop.io.compress.Decompressor

    FileSystem fs = FileSystem.get(conf);
    InputStream is = fs.open(f);
    CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
    CompressionCodec codec = ccf.getCodec(f);
    LOG.info("gzip check codec is " + codec);
    Decompressor decompressor = CodecPool.getDecompressor(codec);
    if (null == decompressor) {
      LOG.info("Verifying gzip sanity with null decompressor");
    } else {
      LOG.info("Verifying gzip sanity with decompressor: "
          + decompressor.toString());
    }
    is = codec.createInputStream(is, decompressor);
    BufferedReader r = new BufferedReader(new InputStreamReader(is));
    int numLines = 0;
    while (true) {
View Full Code Here

Examples of org.apache.hadoop.io.compress.Decompressor

    // resources if the stream is not closed properly after we let it out.
    InputStream is = null;
    if (compressor != null) {
      // GZIPCodec fails w/ NPE if no configuration.
      if (compressor instanceof Configurable) ((Configurable)compressor).setConf(this.conf);
      Decompressor poolDecompressor = CodecPool.getDecompressor(compressor);
      CompressionInputStream cis =
        compressor.createInputStream(new ByteArrayInputStream(cellBlock, offset, length),
        poolDecompressor);
      try {
        // TODO: This is ugly.  The buffer will be resized on us if we guess wrong.
View Full Code Here

Examples of org.apache.hadoop.io.compress.Decompressor

     * @throws IOException
     */
    protected void decompress(byte[] dest, int destOffset,
        InputStream bufferedBoundedStream, int compressedSize,
        int uncompressedSize) throws IOException {
      Decompressor decompressor = null;
      try {
        decompressor = compressAlgo.getDecompressor();
        InputStream is = compressAlgo.createDecompressionStream(
            bufferedBoundedStream, decompressor, 0);

View Full Code Here

Examples of org.apache.hadoop.io.compress.Decompressor

          "Output buffer does not have enough space to hold "
              + uncompressedSize + " decompressed bytes, available: "
              + (dest.length - destOffset));
    }

    Decompressor decompressor = null;
    try {
      decompressor = compressAlgo.getDecompressor();
      InputStream is = compressAlgo.createDecompressionStream(
          bufferedBoundedStream, decompressor, 0);
View Full Code Here

Examples of org.apache.hadoop.io.compress.Decompressor

     * @throws IOException
     */
    private ByteBuffer decompress(final long offset, final int compressedSize,
      final int decompressedSize, final boolean pread)
    throws IOException {
      Decompressor decompressor = null;
      ByteBuffer buf = null;
      try {
        decompressor = this.compressAlgo.getDecompressor();
        // My guess is that the bounded range fis is needed to stop the
        // decompressor reading into next block -- IIRC, it just grabs a
View Full Code Here

Examples of org.apache.hadoop.io.compress.Decompressor

      this.end = in.getPos() + length;
      init(tempReader);
    }
   
    private Decompressor getPooledOrNewDecompressor() {
      Decompressor decompressor = null;
      decompressor = decompressorPool.getCodec(codec.getDecompressorType());
      if (decompressor == null) {
        decompressor = codec.createDecompressor();
      }
      return decompressor;
View Full Code Here

Examples of org.apache.hadoop.io.compress.Decompressor

          "Output buffer does not have enough space to hold "
              + uncompressedSize + " decompressed bytes, available: "
              + (dest.length - destOffset));
    }

    Decompressor decompressor = null;
    try {
      decompressor = compressAlgo.getDecompressor();
      InputStream is = compressAlgo.createDecompressionStream(
          bufferedBoundedStream, decompressor, 0);
View Full Code Here

Examples of org.apache.hadoop.io.compress.Decompressor

   *              <code>Decompressor</code>
   * @return <code>Decompressor</code> for the given
   *         <code>CompressionCodec</code> the pool or a new one
   */
  public static Decompressor getDecompressor(CompressionCodec codec) {
    Decompressor decompressor = (Decompressor) borrow(decompressorPool, codec.getDecompressorType());
    if (decompressor == null) {
      decompressor = codec.createDecompressor();
      LOG.info("Got brand-new decompressor");
    } else {
      LOG.debug("Got recycled decompressor");
View Full Code Here

Examples of org.apache.hadoop.io.compress.Decompressor

      boolean isCompressed = IFile.Reader.isCompressedFlagEnabled(in);
      IFileInputStream checksumIn = new IFileInputStream(in,
          compressedLength - IFile.HEADER.length, ifileReadAhead,
          ifileReadAheadLength);
      in = checksumIn;
      Decompressor decompressor = null;
      if (isCompressed && codec != null) {
        decompressor = CodecPool.getDecompressor(codec);
        if (decompressor != null) {
          decompressor.reset();
          in = codec.createInputStream(checksumIn, decompressor);
        } else {
          LOG.warn("Could not obtain decompressor from CodecPool");
          in = checksumIn;
        }
      }
      try {
        IOUtils.readFully(in, buffer, 0, buffer.length - IFile.HEADER.length);
      } catch (IOException ioe) {
        IOUtils.cleanup(LOG, in);
        throw ioe;
      } finally {
        if (decompressor != null) {
          decompressor.reset();
          CodecPool.returnDecompressor(decompressor);
        }
      }
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.