Examples of TaskAttemptID


Examples of org.apache.hadoop.mapreduce.TaskAttemptID

    //
    // Old Hadoop API
    //
    public org.apache.hadoop.mapred.InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException
    {
        TaskAttemptContext tac = new TaskAttemptContext(jobConf, new TaskAttemptID());
        List<org.apache.hadoop.mapreduce.InputSplit> newInputSplits = this.getSplits(tac);
        org.apache.hadoop.mapred.InputSplit[] oldInputSplits = new org.apache.hadoop.mapred.InputSplit[newInputSplits.size()];
        for (int i = 0; i < newInputSplits.size(); i++)
            oldInputSplits[i] = (ColumnFamilySplit)newInputSplits.get(i);
        return oldInputSplits;
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

    context.getConfiguration().setLong("mapred.max.split.size", maxSplitSize);
    List<InputSplit> splits = inputFormat.getSplits(context);
    assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
    int readCount = 0;
    for (int i = 0; i < splits.size(); i++) {
      TaskAttemptContext tac = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(jonconf, new TaskAttemptID());
      RecordReader<LongWritable, BytesRefArrayWritable> rr = inputFormat.createRecordReader(splits.get(i), tac);
      rr.initialize(splits.get(i), tac);
      while (rr.nextKeyValue()) {
        readCount++;
      }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

    List<InputSplit> splits = inputFormat.getSplits(context);
    assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
    int readCount = 0;
    for (int i = 0; i < splits.size(); i++) {
      TaskAttemptContext tac = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(jonconf,
          new TaskAttemptID());
      RecordReader<LongWritable, BytesRefArrayWritable> rr = inputFormat.createRecordReader(splits.get(i), tac);
      rr.initialize(splits.get(i), tac);
      while (rr.nextKeyValue()) {
        readCount++;
      }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

  public Iterator<HCatRecord> read() throws HCatException {

    HCatInputFormat inpFmt = new HCatInputFormat();
    RecordReader<WritableComparable, HCatRecord> rr;
    try {
      TaskAttemptContext cntxt = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(conf, new TaskAttemptID());
      rr = inpFmt.createRecordReader(split, cntxt);
      rr.initialize(split, cntxt);
    } catch (IOException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    } catch (InterruptedException e) {
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

    int id = sp.getId();
    setVarsInConf(id);
    HCatOutputFormat outFormat = new HCatOutputFormat();
    TaskAttemptContext cntxt = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(
        conf, new TaskAttemptID(ShimLoader.getHadoopShims().getHCatShim().createTaskID(), id));
    OutputCommitter committer = null;
    RecordWriter<WritableComparable<?>, HCatRecord> writer;
    try {
      committer = outFormat.getOutputCommitter(cntxt);
      committer.setupTask(cntxt);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

    oc.commitJob(ojob);
  }

  private TaskAttemptContext createTaskAttemptContext(Configuration tconf) {
    Configuration conf = (tconf == null) ? (new Configuration()) : tconf;
    TaskAttemptID taskId = new TaskAttemptID();
    conf.setInt("mapred.task.partition", taskId.getId());
    conf.set("mapred.task.id", "attempt__0000_r_000000_" + taskId.getId());
    TaskAttemptContext rtaskContext = HCatMapRedUtil.createTaskAttemptContext(conf , taskId);
    return rtaskContext;
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

      job.getCredentials().addAll(parentContext.getCredentials());
      success = job.waitForCompletion(true);
      fs.delete(workDir, true);
      //We only cleanup on success because failure might've been caused by existence of target directory
      if (localMode && success) {
        new ImporterOutputFormat().getOutputCommitter(HCatMapRedUtil.createTaskAttemptContext(conf, new TaskAttemptID())).commitJob(job);
      }
    } catch (InterruptedException e) {
      LOG.error("ImportSequenceFile Failed", e);
    } catch (ClassNotFoundException e) {
      LOG.error("ImportSequenceFile Failed", e);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.TaskAttemptID

    this.conf = null;
  }

  public void initialize(DoFn<?, ?> fn, Integer tid) {
    if (context == null || !Objects.equal(lastTID, tid)) {
      TaskAttemptID attemptID;
      if (tid != null) {
        TaskID taskId = new TaskID(new JobID(jobName, 0), false, tid);
        attemptID = new TaskAttemptID(taskId, 0);
        lastTID = tid;
      } else {
        attemptID = new TaskAttemptID();
        lastTID = null;
      }
      configureLocalFiles();
      context = TaskInputOutputContextFactory.create(getConfiguration(), attemptID, new SparkReporter(counters));
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId

        mapEventIdx = job.mapAttemptCompletionEvents.size();
        job.mapAttemptCompletionEvents.add(TypeConverter.fromYarn(tce));
      }
      job.taskCompletionIdxToMapCompletionIdx.add(mapEventIdx);
     
      TaskAttemptId attemptId = tce.getAttemptId();
      TaskId taskId = attemptId.getTaskId();
      //make the previous completion event as obsolete if it exists
      Integer successEventNo =
          job.successAttemptCompletionEventNoMap.remove(taskId);
      if (successEventNo != null) {
        TaskAttemptCompletionEvent successEvent =
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId

    // create the map container request
    ContainerRequestEvent event = createReq(jobId, 1, 1024,
        new String[] { "h1" });
    allocator.sendRequest(event);
    TaskAttemptId attemptId = event.getAttemptID();
   
    TaskAttempt mockTaskAttempt = mock(TaskAttempt.class);
    when(mockTaskAttempt.getNodeId()).thenReturn(nm1.getNodeId());
    Task mockTask = mock(Task.class);
    when(mockTask.getAttempt(attemptId)).thenReturn(mockTaskAttempt);
    when(mockJob.getTask(attemptId.getTaskId())).thenReturn(mockTask);

    // this tells the scheduler about the requests
    List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
    dispatcher.await();
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.