Package org.apache.hadoop.mapred

Examples of org.apache.hadoop.mapred.RecordWriter


          ObjectInspectorFactory.getReflectionObjectInspector(NestedRow.class,
              ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
    }
    SerDe serde = new OrcSerde();
    OutputFormat<?, ?> outFormat = new OrcOutputFormat();
    RecordWriter writer =
        outFormat.getRecordWriter(fs, conf, testFilePath.toString(),
            Reporter.NULL);
    writer.write(NullWritable.get(),
        serde.serialize(new NestedRow(1,2,3), inspector));
    writer.write(NullWritable.get(),
        serde.serialize(new NestedRow(4,5,6), inspector));
    writer.write(NullWritable.get(),
        serde.serialize(new NestedRow(7,8,9), inspector));
    writer.close(Reporter.NULL);
    serde = new OrcSerde();
    properties.setProperty("columns", "z,r");
    properties.setProperty("columns.types", "int:struct<x:int,y:int>");
    serde.initialize(conf, properties);
    inspector = (StructObjectInspector) serde.getObjectInspector();
View Full Code Here


          ObjectInspectorFactory.getReflectionObjectInspector(StringRow.class,
              ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
    }
    SerDe serde = new OrcSerde();
    OutputFormat<?, ?> outFormat = new OrcOutputFormat();
    RecordWriter writer =
        outFormat.getRecordWriter(fs, job, testFilePath.getName(),
            Reporter.NULL);
    writer.write(NullWritable.get(),
        serde.serialize(new StringRow("a"), inspector));
    writer.close(Reporter.NULL);
    serde = new OrcSerde();
    properties.setProperty("columns", "col");
    properties.setProperty("columns.types", "string");
    serde.initialize(conf, properties);
    inspector = (StructObjectInspector) serde.getObjectInspector();
View Full Code Here

* Consume all outputs and put them in /dev/null.
*/
public class NullOutputFormat implements OutputFormat {
  public RecordWriter getRecordWriter(FileSystem ignored, JobConf job,
                                      String name, Progressable progress) {
    return new RecordWriter(){
        public void write(WritableComparable key, Writable value) { }
        public void close(Reporter reporter) { }
      };
  }
View Full Code Here

        data.toString(), Text.class, ParseData.class);

      final SequenceFile.Writer crawlOut = SequenceFile.createWriter(fs,
        job, crawl, Text.class, CrawlDatum.class);

      return new RecordWriter()
      {
        public void write(WritableComparable key, Writable value)
          throws IOException
        {
          // Test that I can parse the key before I do anything
View Full Code Here

  public static class SegmentOutputFormat extends OutputFormatBase {
    private static final String DEFAULT_SLICE = "default";
   
    @Override
    public RecordWriter getRecordWriter(final FileSystem fs, final JobConf job, final String name, final Progressable progress) throws IOException {
      return new RecordWriter() {
        MapFile.Writer c_out = null;
        MapFile.Writer f_out = null;
        MapFile.Writer pd_out = null;
        MapFile.Writer pt_out = null;
        SequenceFile.Writer g_out = null;
View Full Code Here

      // Get the old copy out of the way
      if (fs.exists(segmentDumpFile)) fs.delete(segmentDumpFile);

      final PrintStream printStream = new PrintStream(fs.create(segmentDumpFile));
      return new RecordWriter() {
        public synchronized void write(WritableComparable key, Writable value) throws IOException {
          ObjectWritable writable = (ObjectWritable) value;
          printStream.println((String) writable.get());
        }
View Full Code Here

      Path cdx = new Path(new Path(job.getOutputPath(), "cdx"), name);
      final SequenceFile.Writer cdxOut = SequenceFile.createWriter(fs,
        job, cdx, Text.class, Text.class,
        SequenceFile.CompressionType.NONE);

      return new RecordWriter()
      {
        private RecordWriter parseOut;
                         
        // Initialization
        {
View Full Code Here

  // MultithreaderMapRunner.
  private synchronized RecordWriter getRecordWriter(String namedOutput,
                                                    String baseFileName,
                                                    final Reporter reporter,Schema schema)
    throws IOException {
    RecordWriter writer = recordWriters.get(baseFileName);
    if (writer == null) {
      if (countersEnabled && reporter == null) {
        throw new IllegalArgumentException(
          "Counters are enabled, Reporter cannot be NULL");
      }
View Full Code Here

      checkTokenName(multiName);
    }

    String baseFileName = (multi) ? namedOutput + "_" + multiName : baseOutputFileName;

    final RecordWriter writer =
      getRecordWriter(namedOutput, baseFileName, reporter,schema);

    return new AvroCollector() {
  
      @SuppressWarnings({"unchecked"})
      public void collect(Object key) throws IOException{
       AvroWrapper wrapper = new AvroWrapper(key);
       writer.write(wrapper, NullWritable.get());
      }
     
      public void collect(Object key,Object value) throws IOException
      {
        writer.write(key,value);
     
   
    };
  }
View Full Code Here

  // MultithreaderMapRunner.
  private synchronized RecordWriter getRecordWriter(String namedOutput,
                                                    String baseFileName,
                                                    final Reporter reporter)
    throws IOException {
    RecordWriter writer = recordWriters.get(baseFileName);
    if (writer == null) {
      if (countersEnabled && reporter == null) {
        throw new IllegalArgumentException(
          "Counters are enabled, Reporter cannot be NULL");
      }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapred.RecordWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.