Package org.apache.hadoop.mapred

Examples of org.apache.hadoop.mapred.TaskAttemptID


    }
    cmd.add(executable);
    // wrap the command in a stdout/stderr capture
    // we are starting map/reduce task of the pipes job. this is not a cleanup
    // attempt.
    TaskAttemptID taskid =
      TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID));
    File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
    File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
    long logLength = TaskLog.getTaskLogLength(conf);
    cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength,
View Full Code Here


        "Split information not expected in MRInput");
    Configuration conf =
      MRHelpers.createConfFromByteString(mrUserPayload.getConfigurationBytes());
    this.jobConf = new JobConf(conf);

    TaskAttemptID taskAttemptId = new TaskAttemptID(
      new TaskID(
        Long.toString(inputContext.getApplicationId().getClusterTimestamp()),
        inputContext.getApplicationId().getId(), TaskType.MAP,
        inputContext.getTaskIndex()),
      inputContext.getTaskAttemptNumber());

    jobConf.set(MRJobConfig.TASK_ATTEMPT_ID,
      taskAttemptId.toString());
    jobConf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID,
      inputContext.getDAGAttemptNumber());

    // TODO NEWTEZ Rename this to be specific to MRInput. This Input, in
    // theory, can be used by the MapProcessor, ReduceProcessor or a custom
View Full Code Here

        taskid.getId());
  }

  public static TaskAttemptID toMRTaskAttemptId(
      TezTaskAttemptID taskAttemptId) {
    return new TaskAttemptID(
        toMRTaskId(taskAttemptId.getTaskID()),
        taskAttemptId.getId());
  }
View Full Code Here

    this.useNewApi = this.jobConf.getUseNewMapper();
    this.isMapperOutput = jobConf.getBoolean(MRConfig.IS_MAP_PROCESSOR,
        false);
    jobConf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID,
        outputContext.getDAGAttemptNumber());
    TaskAttemptID taskAttemptId = new TaskAttemptID(new TaskID(Long.toString(
      outputContext.getApplicationId().getClusterTimestamp()),
      outputContext.getApplicationId().getId(),
      (isMapperOutput ? TaskType.MAP : TaskType.REDUCE),
      outputContext.getTaskIndex()),
      outputContext.getTaskAttemptNumber());
    jobConf.set(JobContext.TASK_ATTEMPT_ID, taskAttemptId.toString());
    jobConf.set(JobContext.TASK_ID, taskAttemptId.getTaskID().toString());
    jobConf.setBoolean(JobContext.TASK_ISMAP, isMapperOutput);
    jobConf.setInt(JobContext.TASK_PARTITION,
      taskAttemptId.getTaskID().getId());
    jobConf.set(JobContext.ID, taskAttemptId.getJobID().toString());

    outputRecordCounter = outputContext.getCounters().findCounter(
        TaskCounter.MAP_OUTPUT_RECORDS);
    fileOutputByteCounter = outputContext.getCounters().findCounter(
        FileOutputFormatCounter.BYTES_WRITTEN);
View Full Code Here

    DeprecatedKeys.init();

    processorContext = context;
    counters = context.getCounters();
    this.taskAttemptId = new TaskAttemptID(
        new TaskID(
            Long.toString(context.getApplicationId().getClusterTimestamp()),
            context.getApplicationId().getId(),
            (isMap ? TaskType.MAP : TaskType.REDUCE),
            context.getTaskIndex()),
View Full Code Here

  public final Configuration conf;
  public final TaskAttemptID taskID;

  public PerThread(Configuration conf) {
    this.conf = HiveUtils.newHiveConf(conf, OutputCmd.class);
    this.taskID = new TaskAttemptID("hiveio_output", 42, true,
        (int) Thread.currentThread().getId(), 0);
  }
View Full Code Here

     */
    public MapTaskCompletionEventsUpdate
    getMapTaskCompletionEventsUpdates(int index, JobID jobId, int max)
            throws IOException {
        String jtId = jobTracker.getJobTracker().getTrackerIdentifier();
        TaskAttemptID dummy =
                new TaskAttemptID(jtId, jobId.getId(), false, 0, 0);
        return taskTrackerList.get(index).getTaskTracker()
                .getMapCompletionEvents(jobId, 0, max,
                        dummy, null);
    }
View Full Code Here

    this.useNewApi = this.jobConf.getUseNewMapper();
    this.isMapperOutput = jobConf.getBoolean(MRConfig.IS_MAP_PROCESSOR,
        false);
    jobConf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID,
        outputContext.getDAGAttemptNumber());
    TaskAttemptID taskAttemptId = org.apache.tez.mapreduce.hadoop.mapreduce.TaskAttemptContextImpl
        .createMockTaskAttemptID(outputContext, isMapperOutput);
    jobConf.set(JobContext.TASK_ATTEMPT_ID, taskAttemptId.toString());
    jobConf.set(JobContext.TASK_ID, taskAttemptId.getTaskID().toString());
    jobConf.setBoolean(JobContext.TASK_ISMAP, isMapperOutput);
    jobConf.setInt(JobContext.TASK_PARTITION,
      taskAttemptId.getTaskID().getId());
    jobConf.set(JobContext.ID, taskAttemptId.getJobID().toString());
   
    if (useNewApi) {
      // set the output part name to have a unique prefix
      if (jobConf.get("mapreduce.output.basename") == null) {
        jobConf.set("mapreduce.output.basename", getOutputFileNamePrefix());
View Full Code Here

      MRHelpers.createConfFromByteString(mrUserPayload.getConfigurationBytes());
    this.jobConf = new JobConf(conf);
    // Add tokens to the jobConf - in case they are accessed within the RR / IF
    jobConf.getCredentials().mergeAll(UserGroupInformation.getCurrentUser().getCredentials());

    TaskAttemptID taskAttemptId = new TaskAttemptID(
      new TaskID(
        Long.toString(inputContext.getApplicationId().getClusterTimestamp()),
        inputContext.getApplicationId().getId(), TaskType.MAP,
        inputContext.getTaskIndex()),
      inputContext.getTaskAttemptNumber());

    jobConf.set(MRJobConfig.TASK_ATTEMPT_ID,
      taskAttemptId.toString());
    jobConf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID,
      inputContext.getDAGAttemptNumber());

    // TODO NEWTEZ Rename this to be specific to MRInput. This Input, in
    // theory, can be used by the MapProcessor, ReduceProcessor or a custom
View Full Code Here

    DeprecatedKeys.init();

    processorContext = context;
    counters = context.getCounters();
    this.taskAttemptId = new TaskAttemptID(
        new TaskID(
            Long.toString(context.getApplicationId().getClusterTimestamp()),
            context.getApplicationId().getId(),
            (isMap ? TaskType.MAP : TaskType.REDUCE),
            context.getTaskIndex()),
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapred.TaskAttemptID

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.