Examples of TaskInfo


Examples of org.apache.hadoop.mapreduce.test.system.TaskInfo

  public void testDirCleanupAfterTaskFailed() throws IOException,
          InterruptedException {
    TTClient ttClient = null;
    FileStatus filesStatus [] = null;
    String localTaskDir = null;
    TaskInfo taskInfo = null;
    TaskID tID = null;
    boolean isTempFolderExists = false;
    Path inputDir = new Path("input");
    Path outputDir = new Path("output");
    Configuration conf = new Configuration(cluster.getConf());
    JobConf jconf = new JobConf(conf);
    jconf.setJobName("Task Failed job");
    jconf.setJarByClass(UtilsForTests.class);
    jconf.setMapperClass(FailedMapperClass.class);
    jconf.setNumMapTasks(1);
    jconf.setNumReduceTasks(0);
    jconf.setMaxMapAttempts(1);
    cleanup(inputDir, conf);
    cleanup(outputDir, conf);
    createInput(inputDir, conf);
    FileInputFormat.setInputPaths(jconf, inputDir);
    FileOutputFormat.setOutputPath(jconf, outputDir);
    RunningJob runJob = jobClient.submitJob(jconf);
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);
   
    int counter = 0;
    while (counter < 60) {
      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
        break;
      } else {
        UtilsForTests.waitFor(1000);
        jInfo = remoteJTClient.getJobInfo(id);
      }
      counter ++;
    }
    Assert.assertTrue("Job has not been started for 1 min.", counter != 60);

    JobStatus[] jobStatus = jobClient.getAllJobs();
    String userName = jobStatus[0].getUsername();
    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }

    tID = TaskID.downgrade(taskInfo.getTaskID());
    FinishTaskControlAction action = new FinishTaskControlAction(tID);
    String[] taskTrackers = taskInfo.getTaskTrackers();
    counter = 0;
    while (counter < 30) {
      if (taskTrackers.length != 0) {
        break;
      }
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
      taskTrackers = taskInfo.getTaskTrackers();
      counter ++;
    }
    Assert.assertTrue("Task tracker not found.", taskTrackers.length != 0);
    String hostName = taskTrackers[0].split("_")[1];
    hostName = hostName.split(":")[0];
    ttClient = cluster.getTTClient(hostName);
    ttClient.getProxy().sendAction(action);

    counter = 0;
    while(counter < 60) {
      if (taskInfo.getTaskStatus().length > 0) {
        if (taskInfo.getTaskStatus()[0].getRunState()
                == TaskStatus.State.RUNNING) {
          break;
        }
      }
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
      counter ++;
    }
    Assert.assertTrue("Task has not been started for 1 min.",
            counter != 60);

    String localDirs[] = ttClient.getMapredLocalDirs();
    TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
    for (String localDir : localDirs) {
      localTaskDir = localDir + "/"
              + TaskTracker.getLocalTaskDir(userName,
                      id.toString(), taskAttID.toString());
      filesStatus = ttClient.listStatus(localTaskDir, true);
      if (filesStatus.length > 0) {
        isTempFolderExists = true;
        break;
      }
    }

    taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
    Assert.assertTrue("Task Attempt directory " +
            taskAttID + " has not been found while task was running.",
                    isTempFolderExists);
    counter = 0;
    while (counter < 30) {
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(tID);
      counter ++;
    }

    Assert.assertEquals("Task status has not been changed to FAILED.",
            taskInfo.getTaskStatus()[0].getRunState(),
                    TaskStatus.State.FAILED);

    filesStatus = ttClient.listStatus(localTaskDir, true);
    Assert.assertTrue("Temporary folder has not been cleanup.",
            filesStatus.length == 0);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo

        }
      }
      if (ttype != null && task.getType() != ttype) {
        continue;
      }
      allTasks.add(new TaskInfo(task));
    }
    return allTasks;
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo

  public TaskInfo getJobTask(@PathParam("jobid") String jid,
      @PathParam("taskid") String tid) {

    Job job = AMWebServices.getJobFromJobIdString(jid, appCtx);
    Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
    return new TaskInfo(task);

  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo

        }
      }
      if (ttype != null && task.getType() != ttype) {
        continue;
      }
      allTasks.add(new TaskInfo(task));
    }
    return allTasks;
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo

      @PathParam("jobid") String jid, @PathParam("taskid") String tid) {

    Job job = getJobFromJobIdString(jid, appCtx);
    checkAccess(job, hsr);
    Task task = getTaskFromTaskIdString(tid, job);
    return new TaskInfo(task);
  }
View Full Code Here

Examples of org.apache.hadoop.tools.rumen.TaskInfo

      long[] specInputBytes = new long[runTasks.length];
      long[] specOutputRecords = new long[runTasks.length];
      long[] specOutputBytes = new long[runTasks.length];

      for (int i = 0; i < runTasks.length; ++i) {
        final TaskInfo specInfo;
        final Counters counters = runTasks[i].getCounters();
        switch (type) {
          case MAP:
             runInputBytes[i] = counters.findCounter("FileSystemCounters",
                 "HDFS_BYTES_READ").getValue() -
                 counters.findCounter(SPLIT_RAW_BYTES).getValue();
             runInputRecords[i] =
               (int)counters.findCounter(MAP_INPUT_RECORDS).getValue();
             runOutputBytes[i] =
               counters.findCounter(MAP_OUTPUT_BYTES).getValue();
             runOutputRecords[i] =
               (int)counters.findCounter(MAP_OUTPUT_RECORDS).getValue();

            specInfo = spec.getTaskInfo(TaskType.MAP, i);
            specInputRecords[i] = specInfo.getInputRecords();
            specInputBytes[i] = specInfo.getInputBytes();
            specOutputRecords[i] = specInfo.getOutputRecords();
            specOutputBytes[i] = specInfo.getOutputBytes();
            System.out.printf(type + " SPEC: %9d -> %9d :: %5d -> %5d\n",
                 specInputBytes[i], specOutputBytes[i],
                 specInputRecords[i], specOutputRecords[i]);
            System.out.printf(type + " RUN:  %9d -> %9d :: %5d -> %5d\n",
                 runInputBytes[i], runOutputBytes[i],
                 runInputRecords[i], runOutputRecords[i]);
            break;
          case REDUCE:
            runInputBytes[i] = 0;
            runInputRecords[i] =
              (int)counters.findCounter(REDUCE_INPUT_RECORDS).getValue();
            runOutputBytes[i] =
              counters.findCounter("FileSystemCounters",
                  "HDFS_BYTES_WRITTEN").getValue();
            runOutputRecords[i] =
              (int)counters.findCounter(REDUCE_OUTPUT_RECORDS).getValue();


            specInfo = spec.getTaskInfo(TaskType.REDUCE, i);
            // There is no reliable counter for reduce input bytes. The
            // variable-length encoding of intermediate records and other noise
            // make this quantity difficult to estimate. The shuffle and spec
            // input bytes are included in debug output for reference, but are
            // not checked
            specInputBytes[i] = 0;
            specInputRecords[i] = specInfo.getInputRecords();
            specOutputRecords[i] = specInfo.getOutputRecords();
            specOutputBytes[i] = specInfo.getOutputBytes();
            System.out.printf(type + " SPEC: (%9d) -> %9d :: %5d -> %5d\n",
                 specInfo.getInputBytes(), specOutputBytes[i],
                 specInputRecords[i], specOutputRecords[i]);
            System.out.printf(type + " RUN:  (%9d) -> %9d :: %5d -> %5d\n",
                 counters.findCounter(REDUCE_SHUFFLE_BYTES).getValue(),
                 runOutputBytes[i], runInputRecords[i], runOutputRecords[i]);
            break;
View Full Code Here

Examples of org.apache.mesos.Protos.TaskInfo

    TaskID taskId = TaskID.newBuilder().setValue(request.request.slave.name).build();

    LOGGER.info("Launching task " + taskId.getValue() + " with URI " +
                joinPaths(jenkinsMaster, SLAVE_JAR_URI_SUFFIX));

    TaskInfo task = TaskInfo
        .newBuilder()
        .setName("task " + taskId.getValue())
        .setTaskId(taskId)
        .setSlaveId(offer.getSlaveId())
        .addResources(
View Full Code Here

Examples of org.apache.mesos.Protos.TaskInfo

          .addUris(URI.newBuilder().setValue(EXECUTOR_PATH).setExecutable(true)))
      .build();

  @Test
  public void testExecutorInfoUnchanged() {
    TaskInfo task = taskFactory.createFrom(TASK, SLAVE);
    assertEquals(DEFAULT_EXECUTOR, task.getExecutor());
    assertEquals(ImmutableSet.of(
            Resources.makeMesosResource(Resources.CPUS, TASK.getTask().getNumCpus()),
            Resources.makeMesosResource(Resources.RAM_MB, TASK.getTask().getRamMb()),
            Resources.makeMesosResource(Resources.DISK_MB, TASK.getTask().getDiskMb()),
            Resources.makeMesosRangeResource(
                Resources.PORTS,
                ImmutableSet.copyOf(TASK.getAssignedPorts().values()))
        ),
        ImmutableSet.copyOf(task.getResourcesList()));
  }
View Full Code Here

Examples of org.apache.mesos.Protos.TaskInfo

  @Test
  public void testCreateFromPortsUnset() {
    AssignedTask assignedTask = TASK.newBuilder();
    assignedTask.unsetAssignedPorts();
    TaskInfo task = taskFactory.createFrom(IAssignedTask.build(assignedTask), SLAVE);
    assertEquals(DEFAULT_EXECUTOR, task.getExecutor());
    assertEquals(ImmutableSet.of(
            Resources.makeMesosResource(Resources.CPUS, TASK.getTask().getNumCpus()),
            Resources.makeMesosResource(Resources.RAM_MB, TASK.getTask().getRamMb()),
            Resources.makeMesosResource(Resources.DISK_MB, TASK.getTask().getDiskMb())
        ),
        ImmutableSet.copyOf(task.getResourcesList()));
  }
View Full Code Here

Examples of org.apache.mesos.Protos.TaskInfo

  private void expectAdjustRetainedTasks(IScheduledTask... tasks) {
    Map<String, ScheduleStatus> statuses =
        Maps.transformValues(Tasks.mapById(ImmutableSet.copyOf(tasks)), Tasks.GET_STATUS);
    AdjustRetainedTasks message = new AdjustRetainedTasks().setRetainedTasks(statuses);
    TaskInfo task = GcExecutorLauncher.makeGcTask(
        HOST, OFFER.getSlaveId(), SETTINGS.getGcExecutorPath().get(), TASK_UUID, message);
    driver.launchTask(OFFER.getId(), task);
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.