Package org.apache.hadoop.mapreduce.test.system

Examples of org.apache.hadoop.mapreduce.test.system.JobInfo


    Assert.assertTrue(MAX_MAP_TASK_ATTEMPTS > 0);

    TTClient tClient = null;
    TTClient[] ttClients = null;

    JobInfo jInfo = remoteJTClient.getJobInfo(rJob.getID());

    // Assert if jobInfo is null
    Assert.assertNotNull(jInfo.getStatus().getRunState());

    // Wait for the job to start running.
    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
      try {
        Thread.sleep(10000);
      } catch (InterruptedException e) {
      }
      ;
      jInfo = remoteJTClient.getJobInfo(rJob.getID());
    }

    // Temporarily store the jobid to use it later for comparision.
    JobID jobidStore = rJob.getID();
    jobidStore = JobID.downgrade(jobidStore);
    LOG.info("job id is :" + jobidStore.toString());

    TaskInfo[] taskInfos = null;

    // After making sure that the job is running,
    // the test execution has to make sure that
    // at least one task has started running before continuing.
    boolean runningCount = false;
    int count = 0;
    do {
      taskInfos = cluster.getJTClient().getProxy().getTaskInfo(rJob.getID());
      runningCount = false;
      for (TaskInfo taskInfo : taskInfos) {
        TaskStatus[] taskStatuses = taskInfo.getTaskStatus();
        if (taskStatuses.length > 0) {
          LOG.info("taskStatuses[0].getRunState() is :"
              + taskStatuses[0].getRunState());
          if (taskStatuses[0].getRunState() == TaskStatus.State.RUNNING) {
            runningCount = true;
            break;
          } else {
            LOG.info("Sleeping 5 seconds");
            Thread.sleep(5000);
          }
        }
      }
      count++;
      // If the count goes beyond a point, then break; This is to avoid
      // infinite loop under unforeseen circumstances. Testcase will anyway
      // fail later.
      if (count > 10) {
        Assert.fail("Since the sleep count has reached beyond a point"
            + "failing at this point");
      }
    } while (!runningCount);

    // This whole module is about getting the task Attempt id
    // of one task and killing it MAX_MAP_TASK_ATTEMPTS times,
    // whenever it re-attempts to run.
    String taskIdKilled = null;
    for (int i = 0; i < MAX_MAP_TASK_ATTEMPTS; i++) {
      taskInfos = cluster.getJTClient().getProxy().getTaskInfo(rJob.getID());

      for (TaskInfo taskInfo : taskInfos) {
        TaskAttemptID taskAttemptID;
        if (!taskInfo.isSetupOrCleanup()) {
          // This is the task which is going to be killed continously in
          // all its task attempts.The first task is getting picked up.
          TaskID taskid = TaskID.downgrade(taskInfo.getTaskID());
          LOG.info("taskid is :" + taskid);
          if (i == 0) {
            taskIdKilled = taskid.toString();
            taskAttemptID = new TaskAttemptID(taskid, i);
            LOG.info("taskAttemptid going to be killed is : " + taskAttemptID);
            (jobClient.new NetworkedJob(jInfo.getStatus())).killTask(
                taskAttemptID, true);
            checkTaskCompletionEvent(taskAttemptID, jInfo);
            break;
          } else {
            if (taskIdKilled.equals(taskid.toString())) {
              taskAttemptID = new TaskAttemptID(taskid, i);
              LOG
                  .info("taskAttemptid going to be killed is : "
                      + taskAttemptID);
              (jobClient.new NetworkedJob(jInfo.getStatus())).killTask(
                  taskAttemptID, true);
              checkTaskCompletionEvent(taskAttemptID, jInfo);
              break;
            }
          }
        }
      }
    }
    // Making sure that the job is complete.
    while (jInfo != null && !jInfo.getStatus().isJobComplete()) {
      Thread.sleep(10000);
      jInfo = remoteJTClient.getJobInfo(rJob.getID());
    }

    // Making sure that the correct jobstatus is got from all the jobs
View Full Code Here


            org.apache.hadoop.mapred.JobID.downgrade(slpJob.getJobID()));
    taskController = conf.get(TTConfig.TT_TASK_CONTROLLER);
    // get the job info so we can get the env variables from the daemon.
    // Now wait for the task to be in the running state, only then the
    // directories will be created
    JobInfo info = wovenClient.getJobInfo(rJob.getID());
    Assert.assertNotNull("JobInfo is null", info);
    JobID id = rJob.getID();
    while (info.runningMaps() != 1) {
      Thread.sleep(1000);
      info = wovenClient.getJobInfo(id);
    }
    TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id);
    for (TaskInfo tInfo : myTaskInfos) {
      if (!tInfo.isSetupOrCleanup()) {
        String[] taskTrackers = tInfo.getTaskTrackers();
        for (String taskTracker : taskTrackers) {
          TTInfo ttInfo = wovenClient.getTTInfo(taskTracker);
          TTClient ttCli = cluster.getTTClient(ttInfo.getStatus().getHost());
          Assert.assertNotNull("TTClient instance is null", ttCli);
          TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
          Assert.assertNotNull("TTTaskInfo is null", ttTaskInfo);
          while (ttTaskInfo.getTaskStatus().getRunState() != TaskStatus.State.RUNNING) {
            Thread.sleep(100);
            ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
          }
          testPermissionWithTaskController(ttCli, conf, info);
          FinishTaskControlAction action =
              new FinishTaskControlAction(TaskID.downgrade(tInfo.getTaskID()));
          for (TTClient cli : cluster.getTTClients()) {
            cli.getProxy().sendAction(action);
          }
        }
      }
    }
    JobInfo jInfo = wovenClient.getJobInfo(id);
    jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
    while (!jInfo.getStatus().isJobComplete()) {
      Thread.sleep(100);
      jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.test.system.JobInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.