Package org.apache.hadoop.mapreduce.test.system

Examples of org.apache.hadoop.mapreduce.test.system.JobInfo


    createInput(inputDir, conf);
    FileInputFormat.setInputPaths(jconf, inputDir);
    FileOutputFormat.setOutputPath(jconf, outputDir);
    RunningJob runJob = jobClient.submitJob(jconf);
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);
    int counter = 0;
    while (counter < 60) {
      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
        break;
      } else {
        UtilsForTests.waitFor(1000);
        jInfo = remoteJTClient.getJobInfo(id);
      }
      counter++;
    }
    Assert.assertTrue("Job has not been started for 1 min.", counter != 60);

    JobStatus[] jobStatus = jobClient.getAllJobs();
    String userName = jobStatus[0].getUsername();
    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }

    counter = 0;
    while (counter < 30) {
      if (taskInfo.getTaskStatus().length > 0) {
        if (taskInfo.getTaskStatus()[0].getRunState() == TaskStatus.State.RUNNING) {
          break;
        }
      }
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
      counter++;
    }
    Assert.assertTrue("Task has not been started for 30 sec.", counter != 30);

    tID = TaskID.downgrade(taskInfo.getTaskID());
    FinishTaskControlAction action = new FinishTaskControlAction(tID);

    String[] taskTrackers = taskInfo.getTaskTrackers();
    counter = 0;
    while (counter < 30) {
      if (taskTrackers.length != 0) {
        break;
      }
      UtilsForTests.waitFor(100);
      taskTrackers = taskInfo.getTaskTrackers();
      counter++;
    }

    String hostName = taskTrackers[0].split("_")[1];
    hostName = hostName.split(":")[0];
    ttClient = cluster.getTTClient(hostName);
    ttClient.getProxy().sendAction(action);
    String localDirs[] = ttClient.getMapredLocalDirs();
    TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
    for (String localDir : localDirs) {
      localTaskDir =
          localDir
              + "/"
              + TaskTracker.getLocalTaskDir(userName, id.toString(), taskAttID
                  .toString());
      filesStatus = ttClient.listStatus(localTaskDir, true);
      if (filesStatus.length > 0) {
        isTempFolderExists = true;
        NetworkedJob networkJob = new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster);
        networkJob.killTask(taskAttID, false);
        break;
      }
    }
View Full Code Here


    createInput(inputDir, conf);
    FileInputFormat.setInputPaths(jconf, inputDir);
    FileOutputFormat.setOutputPath(jconf, outputDir);
    RunningJob runJob = jobClient.submitJob(jconf);
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);

    int counter = 0;
    while (counter < 60) {
      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
        break;
      } else {
        UtilsForTests.waitFor(1000);
        jInfo = remoteJTClient.getJobInfo(id);
      }
View Full Code Here

    Assert.assertTrue(MAX_MAP_TASK_ATTEMPTS > 0);

    TTClient tClient = null;
    TTClient[] ttClients = null;

    JobInfo jInfo = remoteJTClient.getJobInfo(rJob.getID());

    // Assert if jobInfo is null
    Assert.assertNotNull(jInfo.getStatus().getRunState());

    // Wait for the job to start running.
    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
      try {
        Thread.sleep(10000);
      } catch (InterruptedException e) {
      }
      ;
      jInfo = remoteJTClient.getJobInfo(rJob.getID());
    }

    // Temporarily store the jobid to use it later for comparision.
    JobID jobidStore = rJob.getID();
    jobidStore = JobID.downgrade(jobidStore);
    LOG.info("job id is :" + jobidStore.toString());

    TaskInfo[] taskInfos = null;

    // After making sure that the job is running,
    // the test execution has to make sure that
    // at least one task has started running before continuing.
    boolean runningCount = false;
    int count = 0;
    do {
      taskInfos = cluster.getJTClient().getProxy().getTaskInfo(rJob.getID());
      runningCount = false;
      for (TaskInfo taskInfo : taskInfos) {
        TaskStatus[] taskStatuses = taskInfo.getTaskStatus();
        if (taskStatuses.length > 0) {
          LOG.info("taskStatuses[0].getRunState() is :"
              + taskStatuses[0].getRunState());
          if (taskStatuses[0].getRunState() == TaskStatus.State.RUNNING) {
            runningCount = true;
            break;
          } else {
            LOG.info("Sleeping 5 seconds");
            Thread.sleep(5000);
          }
        }
      }
      count++;
      // If the count goes beyond a point, then break; This is to avoid
      // infinite loop under unforeseen circumstances. Testcase will anyway
      // fail later.
      if (count > 10) {
        Assert.fail("Since the sleep count has reached beyond a point"
            + "failing at this point");
      }
    } while (!runningCount);

    // This whole module is about getting the task Attempt id
    // of one task and killing it MAX_MAP_TASK_ATTEMPTS times,
    // whenever it re-attempts to run.
    String taskIdKilled = null;
    for (int i = 0; i < MAX_MAP_TASK_ATTEMPTS; i++) {
      taskInfos = cluster.getJTClient().getProxy().getTaskInfo(rJob.getID());

      for (TaskInfo taskInfo : taskInfos) {
        TaskAttemptID taskAttemptID;
        if (!taskInfo.isSetupOrCleanup()) {
          // This is the task which is going to be killed continously in
          // all its task attempts.The first task is getting picked up.
          TaskID taskid = TaskID.downgrade(taskInfo.getTaskID());
          LOG.info("taskid is :" + taskid);
          if (i == 0) {
            taskIdKilled = taskid.toString();
            taskAttemptID = new TaskAttemptID(taskid, i);
            LOG.info("taskAttemptid going to be killed is : " + taskAttemptID);
            (new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster)).killTask(
                taskAttemptID, true);
            checkTaskCompletionEvent(taskAttemptID, jInfo);
            break;
          } else {
            if (taskIdKilled.equals(taskid.toString())) {
              taskAttemptID = new TaskAttemptID(taskid, i);
              LOG
                  .info("taskAttemptid going to be killed is : "
                      + taskAttemptID);
              (new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster)).killTask(
                  taskAttemptID, true);
              checkTaskCompletionEvent(taskAttemptID, jInfo);
              break;
            }
          }
        }
      }
    }
    // Making sure that the job is complete.
    while (jInfo != null && !jInfo.getStatus().isJobComplete()) {
      Thread.sleep(10000);
      jInfo = remoteJTClient.getJobInfo(rJob.getID());
    }

    // Making sure that the correct jobstatus is got from all the jobs
View Full Code Here

      UtilsForTests.waitFor(100);
      JobClient jobClient = jtClient.getClient();
      JobID jobId =jobClient.getAllJobs()[0].getJobID();
      LOG.info("JobId:" + jobId);
      if (jobId != null) {
        JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
        Assert.assertEquals("Job has not been succeeded",
            jInfo.getStatus().getRunState(), JobStatus.SUCCEEDED);
       }
    } else {
       Assert.assertTrue("Linux Task controller not found.", false);
    }
  }
View Full Code Here

    jobConf.setMemoryForReduceTask(2048);
    int exitCode = ToolRunner.run(jobConf, job, jobArgs);
    Assert.assertEquals("Exit Code:", 0, exitCode);
    UtilsForTests.waitFor(1000);
    JobID jobId = jobClient.getAllJobs()[0].getJobID();
    JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
    Assert.assertEquals(assertMessage,
        jInfo.getStatus().getRunState(), JobStatus.SUCCEEDED);
    return jobId;
  }
View Full Code Here

    //Controls the job till all verification is done
    FinishTaskControlAction.configureControlActionForJob(conf);
    //Submitting the job
    RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
    JobID jobId = rJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
    LOG.info("jInfo is :" + jInfo);
    boolean jobStarted = cluster.getJTClient().isJobStarted(jobId);
    Assert.assertTrue("Job has not started even after a minute",
        jobStarted );
     
View Full Code Here

    rJob.submit();
    RunningJob rJob1 =
        client.getJob(org.apache.hadoop.mapred.JobID.downgrade(rJob.getJobID()));
    JobID id = rJob.getJobID();

    JobInfo jInfo = wovenClient.getJobInfo(id);

    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
      Thread.sleep(1000);
      jInfo = wovenClient.getJobInfo(id);
    }

    LOG.info("Waiting till job starts running one map");
View Full Code Here

    }
    jtClient.getClient().killJob(jobId);
    LOG.info("Waiting till the job is completed...");
    Assert.assertTrue("Job has not been completed for 1 min",
        jtClient.isJobStopped(jobId));
    JobInfo jobInfo = rtClient.getJobInfo(jobId);
    Assert.assertEquals("Job has not been killed",
            jobInfo.getStatus().getRunState(), JobStatus.KILLED);
    UtilsForTests.waitFor(3000);
    Assert.assertTrue("Job directories have not been cleaned up properly " +
        "after completion of job", verifyJobDirectoryCleanup(map));
  }
View Full Code Here

    job.setConf(conf);
    JobConf jobConf = job.setupJobConf(1, 0, 10000,0, 10, 10);
    JobClient client = jtClient.getClient();
    RunningJob runJob = client.submitJob(jobConf);
    JobID jobId = runJob.getID();
    JobInfo jobInfo = rtClient.getJobInfo(jobId);
    Assert.assertTrue("Job has not been started for 1 min",
        jtClient.isJobStarted(jobId));
    TaskInfo [] taskInfos = rtClient.getTaskInfo(jobId);
    boolean isFailTask = false;
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {       
        Assert.assertTrue("Task has not been started for 1 min ",
            jtClient.isTaskStarted(taskinfo));
        String tasktracker = getTaskTracker(taskinfo);
        Assert.assertNotNull("TaskTracker has not been found", tasktracker);
        TTClient ttclient = getTTClient(tasktracker);       
        map.put(ttClient, getTTClientMapRedLocalDirs(ttClient,
            taskinfo, jobId));
        if (!isFailTask) {
          Assert.assertNotNull("TaskInfo is null.", taskinfo);
          TaskID taskId = TaskID.downgrade(taskinfo.getTaskID());
          TaskAttemptID taskAttID = new TaskAttemptID(taskId,
              taskinfo.numFailedAttempts());
          int MAX_MAP_TASK_ATTEMPTS = Integer.
               parseInt(jobConf.get("mapred.map.max.attempts"));
          while(taskinfo.numFailedAttempts() < MAX_MAP_TASK_ATTEMPTS) {
            NetworkedJob networkJob = jtClient.getClient().
               new NetworkedJob(jobInfo.getStatus());
            networkJob.killTask(taskAttID, true);
            taskinfo = rtClient.getTaskInfo(taskinfo.getTaskID());
            taskAttID = new TaskAttemptID(taskId, taskinfo.numFailedAttempts());
            jobInfo = rtClient.getJobInfo(jobId);
          }
          isFailTask=true;
        }
      }
    }
    LOG.info("Waiting till the job is completed...");
    Assert.assertTrue("Job has not been completed for 1 min",
        jtClient.isJobStopped(jobId));
    jobInfo = rtClient.getJobInfo(jobId);
    Assert.assertEquals("Job has not been failed",
            jobInfo.getStatus().getRunState(), JobStatus.FAILED);
    UtilsForTests.waitFor(3000);
    Assert.assertTrue("Directories have not been cleaned up " +
        "after completion of job", verifyJobDirectoryCleanup(map));
  }
View Full Code Here

        getLastDaySucceededTasks();

    //Submitting the job
    RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);

    JobInfo jInfo = remoteJTClient.getJobInfo(rJob.getID());
    LOG.info("jInfo is :" + jInfo);

    //Assert if jobInfo is null
    Assert.assertNotNull("jobInfo is null", jInfo);

    count = 0;
    LOG.info("Waiting till the job is completed...");
    while (jInfo != null && !jInfo.getStatus().isJobComplete()) {
      UtilsForTests.waitFor(1000);
      count++;
      jInfo = remoteJTClient.getJobInfo(rJob.getID());
      //If the count goes beyond a point, then break; This is to avoid
      //infinite loop under unforeseen circumstances. Testcase will
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.test.system.JobInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.