Package org.apache.hadoop.io.compress

Examples of org.apache.hadoop.io.compress.Compressor


          + FlumeConfiguration.COLLECTOR_DFS_COMPRESS_GZIP
          + " is deprecated, please use "
          + FlumeConfiguration.COLLECTOR_DFS_COMPRESS_CODEC
          + " set to GzipCodec instead");
      CompressionCodec gzipC = new GzipCodec();
      Compressor gzCmp = gzipC.createCompressor();
      dstPath = new Path(path + gzipC.getDefaultExtension());
      hdfs = dstPath.getFileSystem(conf);
      writer = hdfs.create(dstPath);
      writer = gzipC.createOutputStream(writer, gzCmp);
      LOG.info("Creating HDFS gzip compressed file: " + dstPath.toString());
      return;
    }

    String codecName = conf.getCollectorDfsCompressCodec();
    List<Class<? extends CompressionCodec>> codecs = CompressionCodecFactory
        .getCodecClasses(FlumeConfiguration.get());
    CompressionCodec codec = null;
    ArrayList<String> codecStrs = new ArrayList<String>();
    codecStrs.add("None");
    for (Class<? extends CompressionCodec> cls : codecs) {
      codecStrs.add(cls.getSimpleName());

      if (cls.getSimpleName().equals(codecName)) {
        try {
          codec = cls.newInstance();
        } catch (InstantiationException e) {
          LOG.error("Unable to instantiate " + codec + " class");
        } catch (IllegalAccessException e) {
          LOG.error("Unable to access " + codec + " class");
        }
      }
    }

    if (codec == null) {
      if (!codecName.equals("None")) {
        LOG.warn("Unsupported compression codec " + codecName
            + ".  Please choose from: " + codecStrs);
      }
      dstPath = new Path(path);
      hdfs = dstPath.getFileSystem(conf);
      writer = hdfs.create(dstPath);
      LOG.info("Creating HDFS file: " + dstPath.toString());
      return;
    }

    Compressor cmp = codec.createCompressor();
    dstPath = new Path(path + codec.getDefaultExtension());
    hdfs = dstPath.getFileSystem(conf);
    writer = hdfs.create(dstPath);
    try {
      writer = codec.createOutputStream(writer, cmp);
View Full Code Here


    final Path resultPath = new Path(tempDir, "result.text");

    System.out.println("testHadoop20JHParser sent its output to " + resultPath);

    Compressor compressor;

    FileSystem fs = resultPath.getFileSystem(conf);
    CompressionCodec codec =
        new CompressionCodecFactory(conf).getCodec(resultPath);
    OutputStream output;
View Full Code Here

    fs = (HFileSystem)HFileSystem.get(TEST_UTIL.getConfiguration());
  }

  public byte[] createTestV1Block(Compression.Algorithm algo)
      throws IOException {
    Compressor compressor = algo.getCompressor();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    OutputStream os = algo.createCompressionStream(baos, compressor, 0);
    DataOutputStream dos = new DataOutputStream(os);
    BlockType.META.write(dos); // Let's make this a meta block.
    TestHFileBlock.writeTestBlockContents(dos);
View Full Code Here

    return totalSize;
  }

  public byte[] createTestV1Block(Compression.Algorithm algo)
      throws IOException {
    Compressor compressor = algo.getCompressor();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    OutputStream os = algo.createCompressionStream(baos, compressor, 0);
    DataOutputStream dos = new DataOutputStream(os);
    BlockType.META.write(dos); // Let's make this a meta block.
    writeTestBlockContents(dos);
View Full Code Here

        " previously failed test.");
      }
    }

    try {
      Compressor c = algo.getCompressor();
      algo.returnCompressor(c);
      compressionTestResults[algo.ordinal()] = true; // passes
    } catch (Throwable t) {
      compressionTestResults[algo.ordinal()] = false; // failure
      throw new IOException(t);
View Full Code Here

        " previously failed test.");
      }
    }

    try {
      Compressor c = algo.getCompressor();
      algo.returnCompressor(c);
      compressionTestResults[algo.ordinal()] = true; // passes
    } catch (Throwable t) {
      compressionTestResults[algo.ordinal()] = false; // failure
      throw new IOException(t);
View Full Code Here

        " previously failed test.");
      }
    }

    try {
      Compressor c = algo.getCompressor();
      algo.returnCompressor(c);
      compressionTestResults[algo.ordinal()] = true; // passes
    } catch (Throwable t) {
      compressionTestResults[algo.ordinal()] = false; // failure
      throw new IOException(t);
View Full Code Here

   *          <code>Compressor</code>
   * @return <code>Compressor</code> for the given <code>CompressionCodec</code>
   *         from the pool or a new one
   */
  public static Compressor getCompressor(CompressionCodec codec) {
    Compressor compressor = borrow(COMPRESSOR_POOL, codec.getCompressorType());
    if (compressor == null) {
      compressor = codec.createCompressor();
      LOG.info("Got brand-new compressor");
    } else {
      LOG.debug("Got recycled compressor");
View Full Code Here

    private void flushRecords() throws IOException {

      key.numberRows = bufferedRecords;

      Compressor compressor = null;
      NonSyncDataOutputBuffer valueBuffer = null;
      CompressionOutputStream deflateFilter = null;
      DataOutputStream deflateOut = null;
      boolean isCompressed = isCompressed();
      int valueLength = 0;
View Full Code Here

      checkAndWriteSync(); // sync
      out.writeInt(recordLen); // total record length
      out.writeInt(keyLength); // key portion length

      if(this.isCompressed()) {
        Compressor compressor = CodecPool.getCompressor(codec);
        NonSyncDataOutputBuffer compressionBuffer =
          new NonSyncDataOutputBuffer();
        CompressionOutputStream deflateFilter =
          codec.createOutputStream(compressionBuffer, compressor);
        DataOutputStream deflateOut = new DataOutputStream(deflateFilter);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.compress.Compressor

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.