Package org.apache.hadoop.io.compress

Examples of org.apache.hadoop.io.compress.DefaultCodec


        Writer writer = null;
        Reader reader = null;
        try {
          short replication = (short) acuConf.getCount(Property.LOGGER_RECOVERY_FILE_REPLICATION);
          writer = SequenceFile.createWriter(fs, fs.getConf(), dest, LogFileKey.class, LogFileValue.class, fs.getConf().getInt("io.file.buffer.size", 4096),
              replication, fs.getDefaultBlockSize(), SequenceFile.CompressionType.BLOCK, new DefaultCodec(), null, new Metadata());
          FileSystem local = TraceFileSystem.wrap(FileSystem.getLocal(fs.getConf()).getRaw());
          reader = new SequenceFile.Reader(local, new Path(findLocalFilename(localLog)), fs.getConf());
          while (reader.next(key, value)) {
            writer.append(key, value);
          }
View Full Code Here


   * @return Returns the handle to the constructed MySequenceFile Writer.
   */
  public static Writer createWriter(FileSystem fs, Configuration conf, Path name, Class keyClass, Class valClass, CompressionType compressionType)
      throws IOException {
    return createWriter(fs, conf, name, keyClass, valClass, fs.getConf().getInt("io.file.buffer.size", 4096), fs.getDefaultReplication(),
        fs.getDefaultBlockSize(), compressionType, new DefaultCodec(), null, new Metadata());
  }
View Full Code Here

   * @return Returns the handle to the constructed MySequenceFile Writer.
   */
  public static Writer createWriter(FileSystem fs, Configuration conf, Path name, Class keyClass, Class valClass, CompressionType compressionType,
      Progressable progress) throws IOException {
    return createWriter(fs, conf, name, keyClass, valClass, fs.getConf().getInt("io.file.buffer.size", 4096), fs.getDefaultReplication(),
        fs.getDefaultBlockSize(), compressionType, new DefaultCodec(), progress, new Metadata());
  }
View Full Code Here

    }
   
    /** Create the named map using the named key comparator. */
    public Writer(Configuration conf, FileSystem fs, String dirName, WritableComparator comparator, Class valClass, MySequenceFile.CompressionType compress,
        Progressable progress) throws IOException {
      this(conf, fs, dirName, comparator, valClass, compress, new DefaultCodec(), progress);
    }
View Full Code Here

            this.codec = ReflectionUtils.newInstance(codecClass, conf);
          } catch (ClassNotFoundException cnfe) {
            throw new IllegalArgumentException("Unknown codec: " + codecClassname, cnfe);
          }
        } else {
          codec = new DefaultCodec();
          ((Configurable) codec).setConf(conf);
        }
      }
     
      this.metadata = new Metadata();
View Full Code Here

            numFields = 10;
        }

        RCFileOutputFormat.setColumnNumber(conf, numFields);
        RCFile.Writer writer = new RCFile.Writer(fs, conf, getFile(output),
                null, new DefaultCodec());

        PrintWriter pw = new PrintWriter(new FileWriter(plainOutput));

        for (int j = 0; j < numRows; j++) {
            BytesRefArrayWritable row = new BytesRefArrayWritable(numFields);
View Full Code Here

              conf.getInt("hbase.regionserver.hlog.replication",
              FSUtils.getDefaultReplication(fs, path))),
            Long.valueOf(conf.getLong("hbase.regionserver.hlog.blocksize",
                FSUtils.getDefaultBlockSize(fs, path))),
            Boolean.valueOf(false) /*createParent*/,
            SequenceFile.CompressionType.NONE, new DefaultCodec(),
            createMetadata(conf, compress)
            });
    } catch (InvocationTargetException ite) {
      // function was properly called, but threw it's own exception
      throw new IOException(ite.getCause());
    } catch (Exception e) {
      // ignore all other exceptions. related to reflection failure
    }

    // if reflection failed, use the old createWriter
    if (this.writer == null) {
      LOG.debug("new createWriter -- HADOOP-6840 -- not available");
      this.writer = SequenceFile.createWriter(fs, conf, path,
        HLogKey.class, WALEdit.class,
        FSUtils.getDefaultBufferSize(fs),
        (short) conf.getInt("hbase.regionserver.hlog.replication",
          FSUtils.getDefaultReplication(fs, path)),
        conf.getLong("hbase.regionserver.hlog.blocksize",
          FSUtils.getDefaultBlockSize(fs, path)),
        SequenceFile.CompressionType.NONE,
        new DefaultCodec(),
        null,
        createMetadata(conf, compress));
    } else {
      if (LOG.isTraceEnabled()) LOG.trace("Using new createWriter -- HADOOP-6840");
    }
View Full Code Here

              conf.getInt("hbase.regionserver.hlog.replication",
              fs.getDefaultReplication())),
            new Long(conf.getLong("hbase.regionserver.hlog.blocksize",
                fs.getDefaultBlockSize())),
            new Boolean(false) /*createParent*/,
            SequenceFile.CompressionType.NONE, new DefaultCodec(),
            new Metadata()
            });
    } catch (InvocationTargetException ite) {
      // function was properly called, but threw it's own exception
      throw new IOException(ite.getCause());
    } catch (Exception e) {
      // ignore all other exceptions. related to reflection failure
    }

    // if reflection failed, use the old createWriter
    if (this.writer == null) {
      LOG.debug("new createWriter -- HADOOP-6840 -- not available");
      this.writer = SequenceFile.createWriter(fs, conf, path,
        HLog.getKeyClass(conf), WALEdit.class,
        fs.getConf().getInt("io.file.buffer.size", 4096),
        (short) conf.getInt("hbase.regionserver.hlog.replication",
          fs.getDefaultReplication()),
        conf.getLong("hbase.regionserver.hlog.blocksize",
          fs.getDefaultBlockSize()),
        SequenceFile.CompressionType.NONE,
        new DefaultCodec(),
        null,
        new Metadata());
    } else {
      LOG.debug("using new createWriter -- HADOOP-6840");
    }
View Full Code Here

      Class<? extends HLogKey> keyClass, Class<? extends KeyValue> valueClass)
      throws IOException {
    return SequenceFile.createWriter(this.fs, this.conf, path, keyClass,
        valueClass, fs.getConf().getInt("io.file.buffer.size", 4096), fs
            .getDefaultReplication(), this.blocksize,
        SequenceFile.CompressionType.NONE, new DefaultCodec(), null,
        new Metadata());
  }
View Full Code Here

              conf.getInt("hbase.regionserver.hlog.replication",
              FSUtils.getDefaultReplication(fs, path))),
            Long.valueOf(conf.getLong("hbase.regionserver.hlog.blocksize",
                FSUtils.getDefaultBlockSize(fs, path))),
            Boolean.valueOf(false) /*createParent*/,
            SequenceFile.CompressionType.NONE, new DefaultCodec(),
            createMetadata(conf, compress)
            });
    } catch (InvocationTargetException ite) {
      // function was properly called, but threw it's own exception
      throw new IOException(ite.getCause());
    } catch (Exception e) {
      // ignore all other exceptions. related to reflection failure
    }

    // if reflection failed, use the old createWriter
    if (this.writer == null) {
      LOG.debug("new createWriter -- HADOOP-6840 -- not available");
      this.writer = SequenceFile.createWriter(fs, conf, path,
        HLogKey.class, WALEdit.class,
        FSUtils.getDefaultBufferSize(fs),
        (short) conf.getInt("hbase.regionserver.hlog.replication",
          FSUtils.getDefaultReplication(fs, path)),
        conf.getLong("hbase.regionserver.hlog.blocksize",
          FSUtils.getDefaultBlockSize(fs, path)),
        SequenceFile.CompressionType.NONE,
        new DefaultCodec(),
        null,
        createMetadata(conf, compress));
    } else {
      if (LOG.isTraceEnabled()) LOG.trace("Using new createWriter -- HADOOP-6840");
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.compress.DefaultCodec

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.