Package org.apache.hadoop.io.compress

Examples of org.apache.hadoop.io.compress.Compressor


    return totalSize;
  }

  public byte[] createTestV1Block(Compression.Algorithm algo)
      throws IOException {
    Compressor compressor = algo.getCompressor();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    OutputStream os = algo.createCompressionStream(baos, compressor, 0);
    DataOutputStream dos = new DataOutputStream(os);
    BlockType.META.write(dos); // Let's make this a meta block.
    writeTestBlockContents(dos);
View Full Code Here


    fs = (HFileSystem)HFileSystem.get(TEST_UTIL.getConfiguration());
  }

  public byte[] createTestV1Block(Compression.Algorithm algo)
      throws IOException {
    Compressor compressor = algo.getCompressor();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    OutputStream os = algo.createCompressionStream(baos, compressor, 0);
    DataOutputStream dos = new DataOutputStream(os);
    BlockType.META.write(dos); // Let's make this a meta block.
    TestHFileBlock.writeTestBlockContents(dos);
View Full Code Here

    fs = (HFileSystem)HFileSystem.get(TEST_UTIL.getConfiguration());
  }

  public byte[] createTestV1Block(Compression.Algorithm algo)
      throws IOException {
    Compressor compressor = algo.getCompressor();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    OutputStream os = algo.createCompressionStream(baos, compressor, 0);
    DataOutputStream dos = new DataOutputStream(os);
    BlockType.META.write(dos); // Let's make this a meta block.
    TestHFileBlock.writeTestBlockContents(dos);
View Full Code Here

    // within the estimated buffer size, then the cost is not worth it.  If we are often well
    // outside the guesstimated buffer size, the processing can be done in half the time if we
    // go w/ the estimated size rather than let the buffer resize.
    ByteBufferOutputStream baos = new ByteBufferOutputStream(bufferSize);
    OutputStream os = baos;
    Compressor poolCompressor = null;
    try {
      if (compressor != null) {
        if (compressor instanceof Configurable) ((Configurable)compressor).setConf(this.conf);
        poolCompressor = CodecPool.getCompressor(compressor);
        os = compressor.createOutputStream(os, poolCompressor);
View Full Code Here

    return totalSize;
  }

  public byte[] createTestV1Block(Compression.Algorithm algo)
      throws IOException {
    Compressor compressor = algo.getCompressor();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    OutputStream os = algo.createCompressionStream(baos, compressor, 0);
    DataOutputStream dos = new DataOutputStream(os);
    BlockType.META.write(dos); // Let's make this a meta block.
    writeTestBlockContents(dos);
View Full Code Here

    private void flushRecords() throws IOException {

      key.numberRows = bufferedRecords;

      Compressor compressor = null;
      NonSyncDataOutputBuffer valueBuffer = null;
      CompressionOutputStream deflateFilter = null;
      DataOutputStream deflateOut = null;
      boolean isCompressed = isCompressed();
      int valueLength = 0;
View Full Code Here

      checkAndWriteSync(); // sync
      out.writeInt(recordLen); // total record length
      out.writeInt(keyLength); // key portion length

      if(this.isCompressed()) {
        Compressor compressor = CodecPool.getCompressor(codec);
        NonSyncDataOutputBuffer compressionBuffer =
          new NonSyncDataOutputBuffer();
        CompressionOutputStream deflateFilter =
          codec.createOutputStream(compressionBuffer, compressor);
        DataOutputStream deflateOut = new DataOutputStream(deflateFilter);
View Full Code Here

   * @param conf the <code>Configuration</code> object which contains confs for creating or reinit the compressor
   * @return <code>Compressor</code> for the given <code>CompressionCodec</code>
   *         from the pool or a new one
   */
  public static Compressor getCompressor(CompressionCodec codec, Configuration conf) {
    Compressor compressor = borrow(COMPRESSOR_POOL, codec.getCompressorType());
    if (compressor == null) {
      compressor = codec.createCompressor();
      LOG.info("Got brand-new compressor ["+codec.getDefaultExtension()+"]");
    } else {
      compressor.reinit(conf);
      if(LOG.isDebugEnabled()) {
        LOG.debug("Got recycled compressor");
      }
    }
    return compressor;
View Full Code Here

    return totalSize;
  }

  public byte[] createTestV1Block(Compression.Algorithm algo)
      throws IOException {
    Compressor compressor = algo.getCompressor();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    OutputStream os = algo.createCompressionStream(baos, compressor, 0);
    DataOutputStream dos = new DataOutputStream(os);
    BlockType.META.write(dos); // Let's make this a meta block.
    writeTestBlockContents(dos);
View Full Code Here

    fs = (HFileSystem)HFileSystem.get(TEST_UTIL.getConfiguration());
  }

  public byte[] createTestV1Block(Compression.Algorithm algo)
      throws IOException {
    Compressor compressor = algo.getCompressor();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    OutputStream os = algo.createCompressionStream(baos, compressor, 0);
    DataOutputStream dos = new DataOutputStream(os);
    BlockType.META.write(dos); // Let's make this a meta block.
    TestHFileBlock.writeTestBlockContents(dos);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.compress.Compressor

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.