Examples of TaskAttemptID


Examples of org.apache.hadoop.mapreduce.TaskAttemptID

        if(reader != null){
            reader.close();
        }
        InputSplit curSplit = inpSplits.get(curSplitIndex);
        TaskAttemptContext tAContext = new TaskAttemptContext(conf,
                new TaskAttemptID());
        reader = inputFormat.createRecordReader(curSplit, tAContext);
        reader.initialize(curSplit, tAContext);
        // create a dummy pigsplit - other than the actual split, the other
        // params are really not needed here where we are just reading the
        // input completely
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

  private TaskAttemptContext getTestTaskAttemptContext(final Job job)
  throws IOException, Exception {
    TaskAttemptContext context;
    if (isPost020MapReduce()) {
      TaskAttemptID id =
        TaskAttemptID.forName("attempt_200707121733_0001_m_000000_0");
      Class<?> clazz =
        Class.forName("org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl");
      Constructor<?> c = clazz.
          getConstructor(Configuration.class, TaskAttemptID.class);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

        private boolean init  = false;

        public IllustratorContext(Configuration conf, DataBag input,
              List<Pair<PigNullableWritable, Writable>> output,
              InputSplit split) throws IOException, InterruptedException {
              super(conf, new TaskAttemptID(), null, null, null, null, split);
              if (output == null)
                  throw new IOException("Null output can not be used");
              this.input = input; this.output = output;
        }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

            public IllustratorContext(Job job,
                  List<Pair<PigNullableWritable, Writable>> input,
                  POPackage pkg
                  ) throws IOException, InterruptedException {
                super(job.getJobConf(), new TaskAttemptID(), new FakeRawKeyValueIterator(input.iterator().hasNext()),
                    null, null, null, null, null, null, PigNullableWritable.class, NullableTuple.class);
                bos = new ByteArrayOutputStream();
                dos = new DataOutputStream(bos);
                org.apache.hadoop.mapreduce.Job nwJob = new org.apache.hadoop.mapreduce.Job(job.getJobConf());
                sortComparator = nwJob.getSortComparator();
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

      final Accumulator<Map<String, Long>> counters) {
    ProxyFactory factory = new ProxyFactory();
    Class<TaskInputOutputContext> superType = TaskInputOutputContext.class;
    Class[] types = new Class[0];
    Object[] args = new Object[0];
    final TaskAttemptID taskAttemptId = new TaskAttemptID();
    if (superType.isInterface()) {
      factory.setInterfaces(new Class[] { superType });
    } else {
      types = new Class[] { Configuration.class, TaskAttemptID.class, RecordWriter.class, OutputCommitter.class,
          StatusReporter.class };
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

  private static TaskInputOutputContext<?, ?, ?, ?> getInMemoryContext(final Configuration conf) {
    ProxyFactory factory = new ProxyFactory();
    Class<TaskInputOutputContext> superType = TaskInputOutputContext.class;
    Class[] types = new Class[0];
    Object[] args = new Object[0];
    final TaskAttemptID taskAttemptId = new TaskAttemptID();
    if (superType.isInterface()) {
      factory.setInterfaces(new Class[] { superType });
    } else {
      types = new Class[] { Configuration.class, TaskAttemptID.class, RecordWriter.class, OutputCommitter.class,
          StatusReporter.class };
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

    InputSplit split = iF.getSplits(jc).get(0);
    sd.setOriginalSchema(jc, schema);
    sd.setOutputSchema(jc, schema);
    sd.initialize(jc, getProps());

    TaskAttemptContext tac = new TaskAttemptContext(conf, new TaskAttemptID());
    RecordReader<?,?> rr = iF.createRecordReader(split,tac);
    rr.initialize(split, tac);
    HowlRecord[] tuples = getExpectedRecords();
    for(int j=0; j < 2; j++){
      Assert.assertTrue(rr.nextKeyValue());
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

    sd.setOriginalSchema(jc, buildHiveSchema());
    sd.setOutputSchema(jc, buildPrunedSchema());

    sd.initialize(jc, getProps());
    conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR,jc.getConfiguration().get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR));
    TaskAttemptContext tac = new TaskAttemptContext(conf, new TaskAttemptID());
    RecordReader<?,?> rr = iF.createRecordReader(split,tac);
    rr.initialize(split, tac);
    HowlRecord[] tuples = getPrunedRecords();
    for(int j=0; j < 2; j++){
      Assert.assertTrue(rr.nextKeyValue());
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

    sd.initialize(jc, getProps());
    Map<String,String> map = new HashMap<String,String>(1);
    map.put("part1", "first-part");
    sd.setPartitionValues(jc, map);
    conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR,jc.getConfiguration().get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR));
    TaskAttemptContext tac = new TaskAttemptContext(conf, new TaskAttemptID());
    RecordReader<?,?> rr = iF.createRecordReader(split,tac);
    rr.initialize(split, tac);
    HowlRecord[] tuples = getReorderedCols();
    for(int j=0; j < 2; j++){
      Assert.assertTrue(rr.nextKeyValue());
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

      context.getConfiguration().setLong("mapred.max.split.size",maxSplitSize);
      List<InputSplit> splits = inputFormat.getSplits(context);
      assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
      int readCount = 0;
      for (int i = 0; i < splits.size(); i++) {
        TaskAttemptContext tac = new TaskAttemptContext(jonconf, new TaskAttemptID());
        RecordReader<LongWritable, BytesRefArrayWritable> rr = inputFormat.createRecordReader(splits.get(i), tac);
        rr.initialize(splits.get(i), tac);
        while (rr.nextKeyValue()) {
          readCount++;
        }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.