Package org.apache.hadoop.hive.ql.io

Examples of org.apache.hadoop.hive.ql.io.FSRecordWriter


        columnMap.put(Bytes.toBytes(columnName), i);
      }
      ++i;
    }

    return new FSRecordWriter() {

      @Override
      public void close(boolean abort) throws IOException {
        try {
          fileWriter.close(null);
View Full Code Here


                        Path outPath,
                        Class<? extends Writable> valueClass,
                        boolean isCompressed,
                        Properties tableProperties,
                        Progressable progress) throws IOException {
    final FSRecordWriter result =
      super.getHiveRecordWriter(jc,outPath,valueClass,isCompressed,
        tableProperties,progress);
    final Reporter reporter = (Reporter) progress;
    reporter.setStatus("got here");
    System.out.println("Got a reporter " + reporter);
    return new FSRecordWriter() {
      @Override
      public void write(Writable w) throws IOException {
        if (w instanceof Text) {
          Text value = (Text) w;
          Rot13InputFormat.rot13(value.getBytes(), 0, value.getLength());
          result.write(w);
        } else if (w instanceof BytesWritable) {
          BytesWritable value = (BytesWritable) w;
          Rot13InputFormat.rot13(value.getBytes(), 0, value.getLength());
          result.write(w);
        } else {
          throw new IllegalArgumentException("need text or bytes writable " +
            " instead of " + w.getClass().getName());
        }
      }

      @Override
      public void close(boolean abort) throws IOException {
        result.close(abort);
      }
    };
  }
View Full Code Here

      throw new HiveException(e);
    }

    for (String p : paths) {
      Path path = new Path(p);
      FSRecordWriter writer = HiveFileFormatUtils.getRecordWriter(
          jc, hiveOutputFormat, outputClass, isCompressed,
          tableInfo.getProperties(), path, reporter);
      writer.close(false);
      LOG.info("created empty bucket for enforcing bucketing at " + path);
    }
  }
View Full Code Here

    //file path.
    newPath = fs.makeQualified(newPath);
    String newFile = newDir + File.separator + "emptyFile";
    Path newFilePath = new Path(newFile);

    FSRecordWriter recWriter = outFileFormat.newInstance().getHiveRecordWriter(job, newFilePath,
        Text.class, false, props, null);
    if (dummyRow) {
      // empty files are omitted at CombineHiveInputFormat.
      // for meta-data only query, it effectively makes partition columns disappear..
      // this could be fixed by other methods, but this seemed to be the most easy (HIVEV-2955)
      recWriter.write(new Text("empty"))// written via HiveIgnoreKeyTextOutputFormat
    }
    recWriter.close(false);

    return newPath;
  }
View Full Code Here

    final OrcRecordUpdater.KeyIndexBuilder watcher =
        new OrcRecordUpdater.KeyIndexBuilder();
    opts.inspector(options.getInspector())
        .callback(watcher);
    final Writer writer = OrcFile.createWriter(filename, opts);
    return new FSRecordWriter() {
      @Override
      public void write(Writable w) throws IOException {
        OrcStruct orc = (OrcStruct) w;
        watcher.addKey(
            ((IntWritable) orc.getFieldValue(OrcRecordUpdater.OPERATION)).get(),
View Full Code Here

        }
        fpaths.stat.addToStat(StatsSetupConst.ROW_COUNT, 1);
      }


      FSRecordWriter rowOutWriter = null;

      if (row_count != null) {
        row_count.set(row_count.get() + 1);
      }

      if (!multiFileSpray) {
        rowOutWriter = rowOutWriters[0];
      } else {
        int keyHashCode = 0;
        for (int i = 0; i < partitionEval.length; i++) {
          Object o = partitionEval[i].evaluate(row);
          keyHashCode = keyHashCode * 31
              + ObjectInspectorUtils.hashCode(o, partitionObjectInspectors[i]);
        }
        key.setHashCode(keyHashCode);
        int bucketNum = prtner.getBucket(key, null, totalFiles);
        int idx = bucketMap.get(bucketNum);
        rowOutWriter = rowOutWriters[idx];
      }
      rowOutWriter.write(recordValue);
    } catch (IOException e) {
      throw new HiveException(e);
    } catch (SerDeException e) {
      throw new HiveException(e);
    }
View Full Code Here

          prevFsp.closeWriters(false);

          // since we are closing the previous fsp's record writers, we need to see if we can get
          // stats from the record writer and store in the previous fsp that is cached
          if (conf.isGatherStats() && isCollectRWStats) {
            FSRecordWriter outWriter = prevFsp.outWriters[0];
            if (outWriter != null) {
              SerDeStats stats = ((StatsProvidingRecordWriter) outWriter).getStats();
              if (stats != null) {
                prevFsp.stat.addToStat(StatsSetupConst.RAW_DATA_SIZE, stats.getRawDataSize());
                prevFsp.stat.addToStat(StatsSetupConst.ROW_COUNT, stats.getRowCount());
View Full Code Here

        // this adds more overhead to the actual processing of row. But if the
        // record writer already gathers the statistics, it can simply return the
        // accumulated statistics which will be aggregated in case of spray writers
        if (conf.isGatherStats() && isCollectRWStats) {
          for (int idx = 0; idx < fsp.outWriters.length; idx++) {
            FSRecordWriter outWriter = fsp.outWriters[idx];
            if (outWriter != null) {
              SerDeStats stats = ((StatsProvidingRecordWriter) outWriter).getStats();
              if (stats != null) {
                fsp.stat.addToStat(StatsSetupConst.RAW_DATA_SIZE, stats.getRawDataSize());
                fsp.stat.addToStat(StatsSetupConst.ROW_COUNT, stats.getRowCount());
View Full Code Here

          ObjectInspectorFactory.getReflectionObjectInspector(MyRow.class,
              ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
    }
    SerDe serde = new OrcSerde();
    HiveOutputFormat<?, ?> outFormat = new OrcOutputFormat();
    FSRecordWriter writer =
        outFormat.getHiveRecordWriter(conf, testFilePath, MyRow.class, true,
            properties, Reporter.NULL);
    writer.write(serde.serialize(new MyRow(1,2), inspector));
    writer.write(serde.serialize(new MyRow(2,2), inspector));
    writer.write(serde.serialize(new MyRow(3,2), inspector));
    writer.close(true);
    serde = new OrcSerde();
    properties.setProperty("columns", "x,y");
    properties.setProperty("columns.types", "int:int");
    serde.initialize(conf, properties);
    assertEquals(OrcSerde.OrcSerdeRow.class, serde.getSerializedClass());
View Full Code Here

  @Test
  @SuppressWarnings("deprecation")
  public void testEmptyFile() throws Exception {
    Properties properties = new Properties();
    HiveOutputFormat<?, ?> outFormat = new OrcOutputFormat();
    FSRecordWriter writer =
        outFormat.getHiveRecordWriter(conf, testFilePath, MyRow.class, true,
            properties, Reporter.NULL);
    writer.close(true);
    properties.setProperty("columns", "x,y");
    properties.setProperty("columns.types", "int:int");
    SerDe serde = new OrcSerde();
    serde.initialize(conf, properties);
    InputFormat<?,?> in = new OrcInputFormat();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.io.FSRecordWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.