Examples of decompress()


Examples of org.apache.hadoop.hive.shims.HadoopShims.DirectDecompressorShim.decompress()

  @Override
  public void directDecompress(ByteBuffer in, ByteBuffer out)
      throws IOException {
    DirectDecompressorShim decompressShim = ShimLoader.getHadoopShims()
        .getDirectDecompressor(DirectCompressionType.SNAPPY);
    decompressShim.decompress(in, out);
    out.flip(); // flip for read
  }
}
View Full Code Here

Examples of org.apache.hadoop.io.compress.DirectDecompressor.decompress()

    if(d!=null) {
      decompr = d.createDirectDecompressor();
    }

    if(d!=null && decompr!=null){
      decompr.decompress(inpBuffer, outBuffer);
    }else{
      logger.warn("This Hadoop implementation does not support a " + codecName +
        " direct decompression codec interface. "+
        "Direct decompression is available only on *nix systems with Hadoop 2.3 or greater. "+
        "Read operations will be a little slower. ");
View Full Code Here

Examples of org.apache.hadoop.io.compress.lz4.Lz4Decompressor.decompress()

  public void testDecompressorCompressNullPointerException() {
    try {
      Lz4Decompressor decompressor = new Lz4Decompressor();
      byte[] bytes = generate(1024 * 6);
      decompressor.setInput(bytes, 0, bytes.length);
      decompressor.decompress(null, 0, 0);
      fail("testDecompressorCompressNullPointerException error !!!");
    } catch (NullPointerException ex) {
      // expected
    } catch (Exception e) {
      fail("testDecompressorCompressNullPointerException ex error !!!");
View Full Code Here

Examples of org.apache.hadoop.io.compress.snappy.SnappyDecompressor.SnappyDirectDecompressor.decompress()

    ByteBuffer expected = ByteBuffer.wrap(rawData);
   
    outBuf.clear();
    while(!decompressor.finished()) {
      decompressor.decompress(inBuf, outBuf);
      if (outBuf.remaining() == 0) {
        outBuf.flip();
        while (outBuf.remaining() > 0) {       
          assertEquals(expected.get(), outBuf.get());
        }
View Full Code Here

Examples of org.apache.hadoop.io.compress.zlib.ZlibDecompressor.ZlibDirectDecompressor.decompress()

    ByteBuffer expected = ByteBuffer.wrap(rawData);
   
    outBuf.clear();
    while(!decompressor.finished()) {
      decompressor.decompress(inBuf, outBuf);
      if (outBuf.remaining() == 0) {
        outBuf.flip();
        while (outBuf.remaining() > 0) {       
          assertEquals(expected.get(), outBuf.get());
        }
View Full Code Here

Examples of org.apache.lucene.codecs.compressing.CompressingStoredFieldsReader.ChunkIterator.decompress()

              docID = nextLiveDoc(it.docBase + it.chunkDocs, liveDocs, maxDoc);
              docCount += it.chunkDocs;
              mergeState.checkAbort.work(300 * it.chunkDocs);
            } else {
              // decompress
              it.decompress();
              if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
                throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
              }
              // copy non-deleted docs
              for (; docID < it.docBase + it.chunkDocs; docID = nextLiveDoc(docID + 1, liveDocs, maxDoc)) {
View Full Code Here

Examples of org.apache.lucene.codecs.compressing.CompressingStoredFieldsReader.ChunkIterator.decompress()

              docID = nextLiveDoc(it.docBase + it.chunkDocs, liveDocs, maxDoc);
              docCount += it.chunkDocs;
              mergeState.checkAbort.work(300 * it.chunkDocs);
            } else {
              // decompress
              it.decompress();
              if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
                throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
              }
              // copy non-deleted docs
              for (; docID < it.docBase + it.chunkDocs; docID = nextLiveDoc(docID + 1, liveDocs, maxDoc)) {
View Full Code Here

Examples of org.apache.lucene.codecs.compressing.CompressingStoredFieldsReader.ChunkIterator.decompress()

            for (int i = 1; i < it.chunkDocs; ++i) {
              startOffsets[i] = startOffsets[i - 1] + it.lengths[i - 1];
            }

            // decompress
            it.decompress();
            if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
              throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
            }
            // copy non-deleted docs
            for (; docID < it.docBase + it.chunkDocs; docID = nextLiveDoc(docID + 1, liveDocs, maxDoc)) {
View Full Code Here

Examples of org.apache.lucene.codecs.compressing.CompressingStoredFieldsReader.ChunkIterator.decompress()

              docID = nextLiveDoc(it.docBase + it.chunkDocs, liveDocs, maxDoc);
              docCount += it.chunkDocs;
              mergeState.checkAbort.work(300 * it.chunkDocs);
            } else {
              // decompress
              it.decompress();
              if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
                throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
              }
              // copy non-deleted docs
              for (; docID < it.docBase + it.chunkDocs; docID = nextLiveDoc(docID + 1, liveDocs, maxDoc)) {
View Full Code Here

Examples of org.apache.lucene.codecs.compressing.CompressingStoredFieldsReader.ChunkIterator.decompress()

              docID = nextLiveDoc(it.docBase + it.chunkDocs, liveDocs, maxDoc);
              docCount += it.chunkDocs;
              mergeState.checkAbort.work(300 * it.chunkDocs);
            } else {
              // decompress
              it.decompress();
              if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
                throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
              }
              // copy non-deleted docs
              for (; docID < it.docBase + it.chunkDocs; docID = nextLiveDoc(docID + 1, liveDocs, maxDoc)) {
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.