Package org.apache.hadoop.io.compress

Examples of org.apache.hadoop.io.compress.DefaultCodec


      Class<? extends HLogKey> keyClass, Class<? extends KeyValue> valueClass)
      throws IOException {
    return SequenceFile.createWriter(this.fs, this.conf, path, keyClass,
        valueClass, fs.getConf().getInt("io.file.buffer.size", 4096), fs
            .getDefaultReplication(), this.blocksize,
        SequenceFile.CompressionType.NONE, new DefaultCodec(), null,
        new Metadata());
  }
View Full Code Here


        this.writer = SequenceFile.createWriter(this.fs, this.conf, newPath,
          HLogKey.class, HLogEdit.class,
          fs.getConf().getInt("io.file.buffer.size", 4096),
          fs.getDefaultReplication(), this.blocksize,
          SequenceFile.CompressionType.NONE, new DefaultCodec(), null,
          new Metadata());

        LOG.info((oldFile != null?
          "Closed " + oldFile + ", entries=" + this.numEntries + ". ": "") +
          "New log writer: " + FSUtils.getPath(newPath));
View Full Code Here

        + useRawLocalFileSystem);
  }

  @Override
  public void open(String filePath) throws IOException {
    DefaultCodec defCodec = new DefaultCodec();
    CompressionType cType = CompressionType.BLOCK;
    open(filePath, defCodec, cType);
  }
View Full Code Here

                                conf,
                                aLocPath,
                                IntWritable.class,
                                VectorWritable.class,
                                CompressionType.BLOCK,
                                new DefaultCodec());
    closeables.addFirst(w);

    int n = 100;
    int m = 2000;
    double percent = 5;
View Full Code Here

          } catch (ClassNotFoundException cnfe) {
            throw new IllegalArgumentException("Unknown codec: " +
                                               codecClassname, cnfe);
          }
        } else {
          codec = new DefaultCodec();
          ((Configurable)codec).setConf(conf);
        }
      }
     
      this.metadata = new Metadata();
View Full Code Here

   
    if (compressionType == CompressionType.NONE) {
      writer = new Writer(fs, conf, name, keyClass, valClass, null, new Metadata());
    } else if (compressionType == CompressionType.RECORD) {
      writer = new RecordCompressWriter(fs, conf, name, keyClass, valClass,
          new DefaultCodec());
    } else if (compressionType == CompressionType.BLOCK){
      writer = new BlockCompressWriter(fs, conf, name, keyClass, valClass,
          new DefaultCodec());
    }
   
    return writer;
  }
View Full Code Here

   
    if (compressionType == CompressionType.NONE) {
      writer = new Writer(fs, conf, name, keyClass, valClass, progress, new Metadata());
    } else if (compressionType == CompressionType.RECORD) {
      writer = new RecordCompressWriter(fs, conf, name,
          keyClass, valClass, new DefaultCodec(), progress, new Metadata());
    } else if (compressionType == CompressionType.BLOCK){
      writer = new BlockCompressWriter(fs, conf, name,
          keyClass, valClass, new DefaultCodec(), progress, new Metadata());
    }
   
    return writer;
  }
View Full Code Here

        "5.3".getBytes("UTF-8"), "hive and hadoop".getBytes("UTF-8"),
        new byte[0], "NULL".getBytes("UTF-8") };

    RCFileOutputFormat.setColumnNumber(conf, expectedFieldsData.length);
    RCFile.Writer writer = new RCFile.Writer(fs, conf, file, null,
        new DefaultCodec());
    BytesRefArrayWritable bytes = new BytesRefArrayWritable(record_1.length);
    for (int i = 0; i < record_1.length; i++) {
      BytesRefWritable cu = new BytesRefWritable(record_1[i], 0,
          record_1[i].length);
      bytes.set(i, cu);
View Full Code Here

      byte[][] fieldsData) throws IOException, SerDeException {
    fs.delete(file, true);

    RCFileOutputFormat.setColumnNumber(conf, fieldsData.length);
    RCFile.Writer writer = new RCFile.Writer(fs, conf, file, null,
        new DefaultCodec());

    BytesRefArrayWritable bytes = new BytesRefArrayWritable(fieldsData.length);
    for (int i = 0; i < fieldsData.length; i++) {
      BytesRefWritable cu = null;
      cu = new BytesRefWritable(fieldsData[i], 0, fieldsData[i].length);
View Full Code Here

      PerformTestRCFileAndSeqFile testcase = new PerformTestRCFileAndSeqFile(
          isLocalFS, file);

      // change these parameters
      boolean checkCorrect = true;
      CompressionCodec codec = new DefaultCodec();
      testcase.columnMaxSize = 30;

      // testcase.testWithColumnNumber(count, 2, checkCorrect, codec);
      // testcase.testWithColumnNumber(count, 10, checkCorrect, codec);
      // testcase.testWithColumnNumber(count, 25, checkCorrect, codec);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.compress.DefaultCodec

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.