Package org.apache.hadoop.mapred

Examples of org.apache.hadoop.mapred.Reporter


      InputSplit split = splits[i];
      Assert.assertTrue(split instanceof TableSnapshotInputFormat.TableSnapshotRegionSplit);

      // validate record reader
      OutputCollector collector = mock(OutputCollector.class);
      Reporter reporter = mock(Reporter.class);
      RecordReader<ImmutableBytesWritable, Result> rr = tsif.getRecordReader(split, job, reporter);

      // validate we can read all the data back
      ImmutableBytesWritable key = rr.createKey();
      Result value = rr.createValue();
View Full Code Here


    JobConf job = new JobConf(conf);
    FileSystem fs = FileSystem.getLocal(conf);
    Path dir = new Path(System.getProperty("test.build.data",".") + "/mapred");
    Path file = new Path(dir, "test.seq");
   
    Reporter reporter = new Reporter() {
        public void setStatus(String status) throws IOException {}
        public void progress() throws IOException {}
      };
   
    int seed = new Random().nextInt();
View Full Code Here

   * @throws IOException I/O errors
   */
  public org.apache.hadoop.mapred.RecordReader<WritableComparable, Writable>
  getBaseRecordReader(JobConf jobConf, Progressable progressable)
    throws IOException {
    Reporter reporter = new ProgressReporter(progressable);
    return baseInputFormat.getRecordReader(baseSplit, jobConf, reporter);
  }
View Full Code Here

    int fileId = CREATED_FILES_COUNTER.incrementAndGet();
    String name = FileOutputFormat.getUniqueName(jobConf, "part-" + fileId);
    if (LOG.isInfoEnabled()) {
      LOG.info("getBaseRecordWriter: Created new with file {}", name);
    }
    Reporter reporter = new ProgressReporter(taskAttemptContext);
    return baseOutputFormat.getRecordWriter(null, jobConf, name, reporter);
  }
View Full Code Here

    org.apache.hadoop.mapred.InputFormat inputFormat =
                              getMapRedInputFormat(jobConf, inputFormatClass);

    Map<String, String> jobProperties = partitionInfo.getJobProperties();
    HCatUtil.copyJobPropertiesToJobConf(jobProperties, jobConf);
    Reporter reporter = InternalUtil.createReporter(taskContext);
    org.apache.hadoop.mapred.RecordReader recordReader =
      inputFormat.getRecordReader(hcatSplit.getBaseSplit(), jobConf, reporter);

    SerDe serde;
    try {
View Full Code Here

    }

    @Override
    public void close(TaskAttemptContext context) throws IOException,
            InterruptedException {
        Reporter reporter = InternalUtil.createReporter(context);
        if (dynamicPartitioningUsed){
            for (org.apache.hadoop.mapred.RecordWriter<? super WritableComparable<?>, ? super Writable> bwriter : baseDynamicWriters.values()){
                //We are in RecordWriter.close() make sense that the context would be TaskInputOutput
                bwriter.close(reporter);
            }
View Full Code Here

          public void collect(Object key, Object value)
            throws IOException {
            //just consume it, no need to write the record anywhere
          }
        };
        Reporter reporter = Reporter.NULL;//dummy reporter
        startOutputThreads(collector, reporter);
      }
      int exitVal = sim.waitFor();
      // how'd it go?
      if (exitVal != 0) {
View Full Code Here

    Configuration conf;
    JobConf job;
    FileSystem fs;
    Path dir;
    Path file;
    Reporter reporter;
    FSDataOutputStream ds;

    try {
      //
      // create job and filesystem and reporter and such.
View Full Code Here

    Configuration conf;
    JobConf job;
    FileSystem fs;
    Path dir;
    Path file;
    Reporter reporter;
    FSDataOutputStream ds;

    try {
      //
      // create job and filesystem and reporter and such.
View Full Code Here

  public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes()
      throws Exception {
    GroupingTableMap gTableMap = null;
    try {
      Result result = mock(Result.class);
      Reporter reporter = mock(Reporter.class);
      gTableMap = new GroupingTableMap();
      Configuration cfg = new Configuration();
      cfg.set(GroupingTableMap.GROUP_COLUMNS, "familyA:qualifierA familyB:qualifierB");
      JobConf jobConf = new JobConf(cfg);
      gTableMap.configure(jobConf);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapred.Reporter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.