Package org.apache.hadoop.io.compress

Examples of org.apache.hadoop.io.compress.Decompressor


        && isInputCompressionEmulationEnabled(conf)) {
      CompressionCodecFactory compressionCodecs =
        new CompressionCodecFactory(conf);
      CompressionCodec codec = compressionCodecs.getCodec(file);
      if (codec != null) {
        Decompressor decompressor = CodecPool.getDecompressor(codec);
        if (decompressor != null) {
          CompressionInputStream in =
            codec.createInputStream(fs.open(file), decompressor);
          //TODO Seek doesnt work with compressed input stream.
          //     Use SplittableCompressionCodec?
View Full Code Here


     * @throws IOException
     */
    protected void decompress(byte[] dest, int destOffset,
        InputStream bufferedBoundedStream,
        int uncompressedSize) throws IOException {
      Decompressor decompressor = null;
      try {
        decompressor = compressAlgo.getDecompressor();
        InputStream is = compressAlgo.createDecompressionStream(
            bufferedBoundedStream, decompressor, 0);

View Full Code Here

     * @throws IOException
     */
    private ByteBuffer decompress(final long offset, final int compressedSize,
      final int decompressedSize, final boolean pread)
    throws IOException {
      Decompressor decompressor = null;
      ByteBuffer buf = null;
      try {
        decompressor = this.compressAlgo.getDecompressor();
        // My guess is that the bounded range fis is needed to stop the
        // decompressor reading into next block -- IIRC, it just grabs a
View Full Code Here

        && isInputCompressionEmulationEnabled(conf)) {
      CompressionCodecFactory compressionCodecs =
        new CompressionCodecFactory(conf);
      CompressionCodec codec = compressionCodecs.getCodec(file);
      if (codec != null) {
        Decompressor decompressor = CodecPool.getDecompressor(codec);
        if (decompressor != null) {
          CompressionInputStream in =
            codec.createInputStream(fs.open(file), decompressor);
          //TODO Seek doesnt work with compressed input stream.
          //     Use SplittableCompressionCodec?
View Full Code Here

    if (ZlibFactory.isNativeZlibLoaded(conf)) {
      byte[] rawData;
      int tryNumber = 5;
      int BYTE_SIZE = 10 * 1024;
      Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
      Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
      rawData = generate(BYTE_SIZE);
      try {
        for (int i = 0; i < tryNumber; i++)
          compressDecompressZlib(rawData, (ZlibCompressor) zlibCompressor,
              (ZlibDecompressor) zlibDecompressor);
View Full Code Here

  public void testZlibCompressorDecompressorSetDictionary() {
    Configuration conf = new Configuration();
    conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
    if (ZlibFactory.isNativeZlibLoaded(conf)) {
      Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
      Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);

      checkSetDictionaryNullPointerException(zlibCompressor);
      checkSetDictionaryNullPointerException(zlibDecompressor);

      checkSetDictionaryArrayIndexOutOfBoundsException(zlibDecompressor);
View Full Code Here

    InputStream decompStream = null;
    try {
      // invalid 0 and 1 bytes , must be 31, -117
      int buffSize = 1 * 1024;
      byte buffer[] = new byte[buffSize];
      Decompressor decompressor = new BuiltInGzipDecompressor();
      DataInputBuffer gzbuf = new DataInputBuffer();
      decompStream = new DecompressorStream(gzbuf, decompressor);
      gzbuf.reset(new byte[] { 0, 0, 1, 1, 1, 1, 11, 1, 1, 1, 1 }, 11);
      decompStream.read(buffer);
    } catch (IOException ioex) {
      // expected
    } catch (Exception ex) {
      fail("invalid 0 and 1 byte in gzip stream" + ex);
    }

    // invalid 2 byte, must be 8
    try {
      int buffSize = 1 * 1024;
      byte buffer[] = new byte[buffSize];
      Decompressor decompressor = new BuiltInGzipDecompressor();
      DataInputBuffer gzbuf = new DataInputBuffer();
      decompStream = new DecompressorStream(gzbuf, decompressor);
      gzbuf.reset(new byte[] { 31, -117, 7, 1, 1, 1, 1, 11, 1, 1, 1, 1 }, 11);
      decompStream.read(buffer);
    } catch (IOException ioex) {
      // expected
    } catch (Exception ex) {
      fail("invalid 2 byte in gzip stream" + ex);
    }

    try {
      int buffSize = 1 * 1024;
      byte buffer[] = new byte[buffSize];
      Decompressor decompressor = new BuiltInGzipDecompressor();
      DataInputBuffer gzbuf = new DataInputBuffer();
      decompStream = new DecompressorStream(gzbuf, decompressor);
      gzbuf.reset(new byte[] { 31, -117, 8, -32, 1, 1, 1, 11, 1, 1, 1, 1 }, 11);
      decompStream.read(buffer);
    } catch (IOException ioex) {
      // expected
    } catch (Exception ex) {
      fail("invalid 3 byte in gzip stream" + ex);
    }
    try {
      int buffSize = 1 * 1024;
      byte buffer[] = new byte[buffSize];
      Decompressor decompressor = new BuiltInGzipDecompressor();
      DataInputBuffer gzbuf = new DataInputBuffer();
      decompStream = new DecompressorStream(gzbuf, decompressor);
      gzbuf.reset(new byte[] { 31, -117, 8, 4, 1, 1, 1, 11, 1, 1, 1, 1 }, 11);
      decompStream.read(buffer);
    } catch (IOException ioex) {
View Full Code Here

     * @throws IOException
     */
    private ByteBuffer decompress(final long offset, final int compressedSize,
      final int decompressedSize, final boolean pread)
    throws IOException {
      Decompressor decompressor = null;
      ByteBuffer buf = null;
      try {
        decompressor = this.compressAlgo.getDecompressor();
        // My guess is that the bounded range fis is needed to stop the
        // decompressor reading into next block -- IIRC, it just grabs a
View Full Code Here

        CompressionCodec codec = ccf.getCodec(path);

        if(codec == null) {
            return new BufferedReader(new FileReader(file));
        } else {
            Decompressor decompressor = CodecPool.getDecompressor(codec);
            FileInputStream fis = new FileInputStream(file);
            CompressionInputStream cis = codec.createInputStream(fis, decompressor);
            BufferedReader br = new BufferedReaderExt(new InputStreamReader(cis), decompressor);
            return br;
        }
View Full Code Here

    input = checksumIn;

    // Are map-outputs compressed?
    if (codec != null) {
      Decompressor decompressor = CodecPool.getDecompressor(codec);
      decompressor.reset();
      input = codec.createInputStream(input, decompressor);
    }
    // Copy map-output into an in-memory buffer
    byte[] shuffleData = fetchedInput.getBytes();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.compress.Decompressor

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.