Examples of JTProtocol


Examples of org.apache.hadoop.mapreduce.test.system.JTProtocol

   * @param none
   * @return void
   */
  @Test
  public void testRetiredJobsHistoryLocation() throws Exception {
    JTProtocol remoteJTClient = cluster.getJTClient().getProxy();
    int testIterationLoop = 0;

    do {
      SleepJob job = null;
      testIterationLoop++;
      job = new SleepJob();
      job.setConf(conf);
      conf = job.setupJobConf(5, 1, 100, 100, 100, 100);
      //Get the value of mapred.jobtracker.retirejob.check. If not
      //found then use 60000 milliseconds, which is the application default.
      retiredJobInterval =
        conf.getInt("mapred.jobtracker.retirejob.check", 60000);
      //Assert if retiredJobInterval is 0
      if ( retiredJobInterval == 0 ) {
        Assert.fail("mapred.jobtracker.retirejob.check is 0");
      }

      conf.setBoolean("mapreduce.job.complete.cancel.delegation.tokens", false);
      jobFileFound = false;

      JobConf jconf = new JobConf(conf);
      jobHistoryDonePathString = null;
      jobHistoryDonePathString = jconf.
          get("mapred.job.tracker.history.completed.location");
      //Assert if jobHistoryDonePathString is null
      Assert.assertNotNull("mapred.job.tracker.history.completed.location " +
          "is null", jobHistoryDonePathString);

      LOG.info("jobHistoryDonePath location is :" + jobHistoryDonePathString);

      FileStatus[] jobHistoryDoneFileStatuses = dfs.
          listStatus(new Path (jobHistoryDonePathString));
      String jobHistoryPathString = jconf.get("hadoop.job.history.location");

      //Submitting the job
      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);

      JobID jobID = rJob.getID();
      JobInfo jInfo = remoteJTClient.getJobInfo(jobID);
      String jobIDString = jobID.toString();
      LOG.info("jobIDString is :" + jobIDString);

      //Assert if jobInfo is null
      Assert.assertNotNull("jobInfo is null", jInfo);

      waitTillRunState(jInfo, jobID, remoteJTClient);

      if (jobHistoryPathString != null) {
        FileStatus[] jobHistoryFileStatuses = dfs.
          listStatus(new Path (jobHistoryPathString));
        jobFileFound = false;
        for (FileStatus jobHistoryFileStatus : jobHistoryFileStatuses) {
          if ((jobHistoryFileStatus.getPath().toString()).
              matches(jobIDString)) {
            jobFileFound = true;
            break;
          }
        }
        Assert.assertTrue("jobFileFound is false", jobFileFound);
      }

      TaskInfo[] taskInfos = cluster.getJTClient().getProxy()
          .getTaskInfo(rJob.getID());

      //Killing this job will happen only in the second iteration.
      if (testIterationLoop == 2) {
        //Killing the job because all the verification needed
        //for this testcase is completed.
        rJob.killJob();
      }

      //Making sure that the job is complete.
      count = 0;
      while (jInfo != null && !jInfo.getStatus().isJobComplete()) {
        UtilsForTests.waitFor(10000);
        count++;
        jInfo = remoteJTClient.getJobInfo(rJob.getID());
        //If the count goes beyond 100 seconds, then break; This is to avoid
        //infinite loop.
        if (count > 10) {
          Assert.fail("job has not reached running state for more than" +
              "100 seconds. Failing at this point");
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JTProtocol

   * @return void
   */
  @Test
  public void testRetiredMultipleJobsHistoryLocation() throws Exception {
    Configuration conf = new Configuration(cluster.getConf());
    JTProtocol remoteJTClient = cluster.getJTClient().getProxy();
    int testIterationLoop = 0;
    FileStatus[] jobHistoryDoneFileStatuses;
    RunningJob[] rJobCollection = new RunningJob[4];
    JobID[] rJobIDCollection = new JobID[4];
    String jobHistoryDonePathString = null;
    JobInfo jInfo = null;
    for ( int noOfJobs = 0; noOfJobs < 4; noOfJobs++ ) {
      SleepJob job = null;
      testIterationLoop++;
      job = new SleepJob();
      job.setConf(conf);
      conf = job.setupJobConf(5, 1, 100, 100, 100, 100);
      conf.setBoolean("mapreduce.job.complete.cancel.delegation.tokens",
        false);
      JobConf jconf = new JobConf(conf);

      jobHistoryDonePathString = null;
      jobHistoryDonePathString = jconf.
          get("mapred.job.tracker.history.completed.location");
      //Assert if jobHistoryDonePathString is null
      Assert.assertNotNull("mapred.job.tracker.history.completed.location "
          + "is null", jobHistoryDonePathString);

      LOG.info("jobHistoryDonePath location is :" +
          jobHistoryDonePathString);

      //Submitting the job
      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
      JobID jobID = rJob.getID();
    
      rJobCollection[noOfJobs] = rJob;
      rJobIDCollection[noOfJobs] = jobID;

      jInfo = remoteJTClient.getJobInfo(jobID);
      LOG.info("jobIDString is :" + jobID.toString());
      //Assert if jobInfo is null
      Assert.assertNotNull("jobInfo is null", jInfo);
    }

    //Wait for the jobs to start running.
    for (int noOfJobs = 0; noOfJobs < 4; noOfJobs++) {
      waitTillRunState(jInfo, rJobIDCollection[noOfJobs], remoteJTClient);
    }

    //Killing two jobs
    (rJobCollection[0]).killJob();
    (rJobCollection[3]).killJob();

    //Making sure that the jobs are complete.
    for (int noOfJobs = 0; noOfJobs < 4; noOfJobs++) {
      count = 0;
      while (remoteJTClient.getJobInfo(rJobIDCollection[noOfJobs]) != null &&
          !(remoteJTClient.getJobInfo(rJobIDCollection[noOfJobs])).
          getStatus().isJobComplete()) {
        UtilsForTests.waitFor(10000);
        count++;
        //If the count goes beyond 100 seconds, then break; This is to avoid
        //infinite loop.
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JTProtocol

   * @param none
   * @return void
   */
  public void testDistributedCache() throws Exception {
    Configuration conf = new Configuration(cluster.getConf());
    JTProtocol wovenClient = cluster.getJTClient().getProxy();

    //This counter will check for count of a loop,
    //which might become infinite.
    int count = 0;
    //This boolean will decide whether to run job again
    boolean continueLoop = true;
    //counter for job Loop
    int countLoop = 0;
    //This counter increases with all the tasktrackers in which tasks ran
    int taskTrackerCounter = 0;
    //This will store all the tasktrackers in which tasks ran
    ArrayList<String> taskTrackerCollection = new ArrayList<String>();
    //This boolean tells if two tasks ran onteh same tasktracker or not
    boolean taskTrackerFound = false;

    do {
      SleepJob job = new SleepJob();
      job.setConf(conf);
      conf = job.setupJobConf(5, 1, 1000, 1000, 100, 100);
      conf.setBoolean("mapreduce.job.complete.cancel.delegation.tokens", false);

      //Before starting, Modify the file
      String input = "This will be the content of\n" + "distributed cache\n";
      //Creating the path with the file
      DataOutputStream file =
          UtilsForTests.createTmpFileDFS(dfs, URIPATH, permission, input);

      DistributedCache.createSymlink(conf);
      URI uri = URI.create(uriPath);
      DistributedCache.addCacheFile(uri, conf);
      JobConf jconf = new JobConf(conf);

      //Controls the job till all verification is done
      FinishTaskControlAction.configureControlActionForJob(conf);

      //Submitting the job
      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);

      //counter for job Loop
      countLoop++;

      TTClient tClient = null;
      JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
      LOG.info("jInfo is :" + jInfo);

      //Assert if jobInfo is null
      Assert.assertNotNull("jobInfo is null", jInfo);

      //Wait for the job to start running.
      count = 0;
      while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
        UtilsForTests.waitFor(10000);
        count++;
        jInfo = wovenClient.getJobInfo(rJob.getID());
        //If the count goes beyond a point, then break; This is to avoid
        //infinite loop under unforeseen circumstances. Testcase will anyway
        //fail later.
        if (count > 10) {
          Assert.fail("job has not reached running state for more than" +
            "100 seconds. Failing at this point");
        }
      }

      LOG.info("job id is :" + rJob.getID().toString());

      TaskInfo[] taskInfos = cluster.getJTClient().getProxy()
             .getTaskInfo(rJob.getID());

      boolean distCacheFileIsFound;

      for (TaskInfo taskInfo : taskInfos) {
        distCacheFileIsFound = false;
        String[] taskTrackers = taskInfo.getTaskTrackers();
        for (String taskTracker : taskTrackers) {
          //Formatting tasktracker to get just its FQDN
          taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
          LOG.info("taskTracker is :" + taskTracker);

          //The tasktrackerFound variable is initialized
          taskTrackerFound = false;

          //This will be entered from the second job onwards
          if (countLoop > 1) {
            if (taskTracker != null) {
              continueLoop = taskTrackerCollection.contains(taskTracker);
            }
            if (continueLoop) {
              taskTrackerFound = true;
            }
          }
          //Collecting the tasktrackers
          if (taskTracker != null)
            taskTrackerCollection.add(taskTracker);

          //we have loopped through two times to look for task
          //getting submitted on same tasktrackers.The same tasktracker
          //for subsequent jobs was not hit maybe because of many number
          //of tasktrackers. So, testcase has to stop here.
          if (countLoop > 1) {
            continueLoop = false;
          }

          tClient = cluster.getTTClient(taskTracker);

          //tClient maybe null because the task is already dead. Ex: setup
          if (tClient == null) {
            continue;
          }

          String[] localDirs = tClient.getMapredLocalDirs();
          int distributedFileCount = 0;
          //Go to every single path
          for (String localDir : localDirs) {
            //Public Distributed cache will always be stored under
            //mapre.local.dir/tasktracker/archive
            localDir = localDir + Path.SEPARATOR +
                   TaskTracker.getPublicDistributedCacheDir();
            LOG.info("localDir is : " + localDir);

            //Get file status of all the directories
            //and files under that path.
            FileStatus[] fileStatuses = tClient.listStatus(localDir,
                true, true);
            for (FileStatus  fileStatus : fileStatuses) {
              Path path = fileStatus.getPath();
              LOG.info("path is :" + path.toString());
              //Checking if the received path ends with
              //the distributed filename
              distCacheFileIsFound = (path.toString()).
                  endsWith(distributedFileName);
              //If file is found, check for its permission.
              //Since the file is found break out of loop
              if (distCacheFileIsFound){
                LOG.info("PATH found is :" + path.toString());
                distributedFileCount++;
                String filename = path.getName();
                FsPermission fsPerm = fileStatus.getPermission();
                Assert.assertTrue("File Permission is not 777",
                    fsPerm.equals(new FsPermission("777")));
              }
            }
          }

          LOG.debug("The distributed FileCount is :" + distributedFileCount);
          LOG.debug("The taskTrackerFound is :" + taskTrackerFound);

          // If distributed cache is modified in dfs
          // between two job runs, it can be present more than once
          // in any of the task tracker, in which job ran.
          if (distributedFileCount != 2 && taskTrackerFound) {
            Assert.fail("The distributed cache file has to be two. " +
                "But found was " + distributedFileCount);
          } else if (distributedFileCount < 1)
            Assert.fail("The distributed cache file is less than one. " +
                "But found was " + distributedFileCount);
          if (!distCacheFileIsFound) {
            Assert.assertEquals("The distributed cache file does not exist",
                distCacheFileIsFound, false);
          }
        }
      }
      //Allow the job to continue through MR control job.
      cluster.signalAllTasks(rJob.getID());

      //Killing the job because all the verification needed
      //for this testcase is completed.
      rJob.killJob();

      //Waiting for 3 seconds for cleanup to start
      Thread.sleep(3000);

      //Getting the last cleanup task's tasktracker also, as
      //distributed cache gets uploaded even during cleanup.
      TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(rJob.getID());
      if (myTaskInfos != null) {
        for(TaskInfo info : myTaskInfos) {
          if(info.isSetupOrCleanup()) {
            String[] taskTrackers = info.getTaskTrackers();
            for(String taskTracker : taskTrackers) {
              //Formatting tasktracker to get just its FQDN
              taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
              LOG.info("taskTracker is :" + taskTracker);
              //Collecting the tasktrackers
              if (taskTracker != null)
                taskTrackerCollection.add(taskTracker);
            }   
          }
        }
      }

      //Making sure that the job is complete.
      while (jInfo != null && !jInfo.getStatus().isJobComplete()) {
        Thread.sleep(10000);
        jInfo = wovenClient.getJobInfo(rJob.getID());
      }

    } while (continueLoop);
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JTProtocol

    FileInputFormat.setInputPaths(jobConf, inputDir);
    FileOutputFormat.setOutputPath(jobConf, outputDir);

    JTClient jtClient = cluster.getJTClient();
    JobClient client = jtClient.getClient();
    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    RunningJob runJob = client.submitJob(jobConf);
    JobID id = runJob.getID();
    JobInfo jInfo = wovenClient.getJobInfo(id);
    Assert.assertNotNull("Job information is null",jInfo);

    Assert.assertTrue("Job has not been started for 1 min.",
  jtClient.isJobStarted(id));
    JobStatus[] jobStatus = client.getAllJobs();
    String userName = jobStatus[0].getUsername();

    TaskInfo[] taskInfos = wovenClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JTProtocol

    FileInputFormat.setInputPaths(jobConf, inputDir);
    FileOutputFormat.setOutputPath(jobConf, outputDir);

    JTClient jtClient = cluster.getJTClient();
    JobClient client = jtClient.getClient();
    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    RunningJob runJob = client.submitJob(jobConf);
    JobID id = runJob.getID();
    JobInfo jInfo = wovenClient.getJobInfo(id);
    Assert.assertNotNull("Job information is null",jInfo);

    Assert.assertTrue("Job has not been started for 1 min.",
        jtClient.isJobStarted(id));

    JobStatus[] jobStatus = client.getAllJobs();
    String userName = jobStatus[0].getUsername();

    TaskInfo[] taskInfos = wovenClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JTProtocol

   * @param none
   * @return void
   */
  public void testDistributedCache() throws Exception {
    Configuration conf = new Configuration(cluster.getConf());
    JTProtocol wovenClient = cluster.getJTClient().getProxy();

    String jobTrackerUserName = wovenClient.getDaemonUser();

    LOG.info("jobTrackerUserName is :" + jobTrackerUserName);

    //This counter will check for count of a loop,
    //which might become infinite.
    int count = 0;

    SleepJob job = new SleepJob();
    job.setConf(conf);
    conf = job.setupJobConf(5, 1, 1000, 1000, 100, 100);

    DistributedCache.createSymlink(conf);
    URI uri = URI.create(uriPath);
    DistributedCache.addCacheFile(uri, conf);
    JobConf jconf = new JobConf(conf);

    //Controls the job till all verification is done
    FinishTaskControlAction.configureControlActionForJob(conf);

    //Submitting the job
    RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);

    JobStatus[] jobStatus = client.getAllJobs();
    String userName = jobStatus[0].getUsername();

    TTClient tClient = null;
    JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
    LOG.info("jInfo is :" + jInfo);

    //Assert if jobInfo is null
    Assert.assertNotNull("jobInfo is null", jInfo);

    //Wait for the job to start running.
    count = 0;
    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
      UtilsForTests.waitFor(10000);
      count++;
      jInfo = wovenClient.getJobInfo(rJob.getID());
      //If the count goes beyond a point, then Assert; This is to avoid
      //infinite loop under unforeseen circumstances.
      if (count > 10) {
        Assert.fail("job has not reached running state for more than" +
            "100 seconds. Failing at this point");
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JTProtocol

    conf.setInt("test.randomwrite.bytes_per_map", RW_BYTES_PER_MAP);
    conf.setInt("test.randomwriter.maps_per_host", RW_MAPS_PER_HOST);
    String[] rwArgs = {inputDir.toString()};

    // JTProtocol remoteJTClient
    JTProtocol remoteJTClient = cluster.getJTClient().getProxy();

    // JobInfo jInfo;
    JobInfo jInfo = null;

    dfs.delete(inputDir, true);

    // Run RandomWriter
    Assert.assertEquals(ToolRunner.run(conf, new RandomWriter(), rwArgs),
        0);

    jobStatus = client.getAllJobs();
    JobID id = null;
    //Getting the jobId of the just submitted job
    id = jobStatus[0].getJobID();

    LOG.info("jobid is :" + id.toString());

    Assert.assertTrue("Failed to complete the job",
    cluster.getJTClient().isJobStopped(id));

    jInfo = remoteJTClient.getJobInfo(id);
    JobStatus jStatus = jInfo.getStatus();

    if (jStatus != null) {
      Assert.assertEquals("Job has not succeeded...",
        JobStatus.SUCCEEDED, jStatus.getRunState());
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JTProtocol

   * At last the node is started.
   */
  @Test
  public void TestNodeDecommissioning() throws Exception {

    JTProtocol remoteJTClientProxy = cluster.getJTClient().getProxy();

    JTClient remoteJTClient = cluster.getJTClient();
    String jtClientHostName = remoteJTClient.getHostName();
    InetAddress localMachine = java.net.InetAddress.getLocalHost();
    String testRunningHostName = localMachine.getHostName();
    LOG.info("Hostname of local machine: " + testRunningHostName);

    List<TTClient> ttClients = cluster.getTTClients();

    //One slave is got
    TTClient ttClient = (TTClient)ttClients.get(0);
    String ttClientHostName = ttClient.getHostName();

    //Hadoop Conf directory is got
    String hadoopConfDir = cluster.getConf().get(
        HadoopDaemonRemoteCluster.CONF_HADOOPCONFDIR);

    LOG.info("hadoopConfDir is:" + hadoopConfDir);

    //Hadoop Home is got
    String hadoopHomeDir = cluster.getConf().get(
        HadoopDaemonRemoteCluster.CONF_HADOOPHOME);

    LOG.info("hadoopHomeDir is:" + hadoopHomeDir);

    conf = cluster.getJTClient().getProxy().getDaemonConf();
    //"mapred.hosts.exclude" path is got
    String excludeHostPathString = (String) conf.get("mapred.hosts.exclude");
    String keytabForHadoopqaUser =
        "/homes/hadoopqa/hadoopqa.dev.headless.keytab hadoopqa";
    excludeHostPath = new Path(excludeHostPathString);
    LOG.info("exclude Host pathString is :" + excludeHostPathString);

    //One sleep job is submitted
    SleepJob job = new SleepJob();
    job.setConf(conf);
    conf = job.setupJobConf(1, 0, 100, 100, 100, 100);
    JobConf jconf = new JobConf(conf);
    RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);

    //username who submitted the job is got.
    String userName = null;
    try {
      JobStatus[] jobStatus = cluster.getJTClient().getClient().getAllJobs();
      userName = jobStatus[0].getUsername();
    } catch(Exception ex) {
      LOG.error("Failed to get user name");
      boolean status = false;
      Assert.assertTrue("Failed to get the userName", status);
    }

    //The client which needs to be decommissioned is put in the exclude path.
    String command = "echo " + ttClientHostName + " > " + excludeHostPath;
    LOG.info("command is : " + command);
    RemoteExecution rExec = new SSHRemoteExecution();
    rExec.executeCommand(jtClientHostName, userName, command);

    //The refreshNode command is created and execute in Job Tracker Client.
    String refreshNodeCommand = "export HADOOP_CONF_DIR=" + hadoopConfDir +
        "; export HADOOP_HOME=" + hadoopHomeDir + ";cd " + hadoopHomeDir +
        ";kinit -k -t " + keytabForHadoopqaUser +
        ";bin/hadoop mradmin -refreshNodes;";
    LOG.info("refreshNodeCommand is : " + refreshNodeCommand);
    try {
      rExec.executeCommand(testRunningHostName, userName,
          refreshNodeCommand);
    } catch (Exception e) { e.printStackTrace();}

    //Checked whether the node is really decommissioned.
    boolean nodeDecommissionedOrNot = false;
    nodeDecommissionedOrNot = remoteJTClientProxy.
        isNodeDecommissioned(ttClientHostName);

    //The TTClient host is removed from the exclude path
    command = "rm " + excludeHostPath;

    LOG.info("command is : " + command);
    rExec.executeCommand(jtClientHostName, userName, command);

    Assert.assertTrue("Node should be decommissioned", nodeDecommissionedOrNot);

    //The refreshNode command is created and execute in Job Tracker Client.
    rExec.executeCommand(jtClientHostName, userName,
        refreshNodeCommand);

    //Checked whether the node is out of decommission.
    nodeDecommissionedOrNot = false;
    nodeDecommissionedOrNot = remoteJTClientProxy.
        isNodeDecommissioned(ttClientHostName);
    Assert.assertFalse("present of not is", nodeDecommissionedOrNot);

    //Starting that node
    String ttClientStart = "export HADOOP_CONF_DIR=" + hadoopConfDir +
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JTProtocol

  }
 
  @Test
  public void testControlledJob() throws Exception {
    Configuration conf = new Configuration(cluster.getConf());
    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    FinishTaskControlAction.configureControlActionForJob(conf);
    SleepJob job = new SleepJob();
    job.setConf(conf);
   
    conf = job.setupJobConf(1, 0, 100, 100, 100, 100);
    JobClient client = cluster.getJTClient().getClient();
   
    RunningJob rJob = client.submitJob(new JobConf(conf));
    JobID id = rJob.getID();
   
    JobInfo jInfo = wovenClient.getJobInfo(id);
   
    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
      Thread.sleep(1000);
      jInfo = wovenClient.getJobInfo(id);
    }
   
    LOG.info("Waiting till job starts running one map");
    jInfo = wovenClient.getJobInfo(id);
    Assert.assertEquals(jInfo.runningMaps(), 1);
   
    LOG.info("waiting for another cycle to " +
        "check if the maps dont finish off");
    Thread.sleep(1000);
    jInfo = wovenClient.getJobInfo(id);
    Assert.assertEquals(jInfo.runningMaps(), 1);
   
    TaskInfo[] taskInfos = wovenClient.getTaskInfo(id);
   
    for(TaskInfo info : taskInfos) {
      LOG.info("constructing control action to signal task to finish");
      FinishTaskControlAction action = new FinishTaskControlAction(
          TaskID.downgrade(info.getTaskID()));
      for(TTClient cli : cluster.getTTClients()) {
        cli.getProxy().sendAction(action);
      }
    }
   
    jInfo = wovenClient.getJobInfo(id);
    int i = 1;
    if (jInfo != null) {
      while (!jInfo.getStatus().isJobComplete()) {
        Thread.sleep(1000);
        jInfo = wovenClient.getJobInfo(id);
        if (jInfo == null) {
          break;
        }
        if(i > 40) {
          Assert.fail("Controlled Job with ID : "
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JTProtocol

        cluster.getJTClient().isJobStarted(jobId));
    TaskInfo taskInfo = getTaskInfo(jobId, true);
    Assert.assertNotNull("TaskInfo is null",taskInfo);
    Assert.assertTrue("Task has not been started for 1 min.",
        cluster.getJTClient().isTaskStarted(taskInfo));
    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    int counter = 0;
    TaskInfo tempTaskInfo;
    while (counter++ < 60) {
      if (taskInfo.getTaskStatus().length == 0) {
        UtilsForTests.waitFor(1000);
        tempTaskInfo = taskInfo;
        taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
      }else if (taskInfo.getTaskStatus()[0].getRunState() ==
          TaskStatus.State.RUNNING) {
        UtilsForTests.waitFor(1000);
        tempTaskInfo = taskInfo;
        taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
      } else {
        break;
      }

      if (taskInfo == null) {
        taskInfo = tempTaskInfo;
        break;
      }
    }

    verifyProcessTreeOverLimit(taskInfo,jobId);
    JobInfo jInfo = wovenClient.getJobInfo(jobId);
    LOG.info("Waiting till the job is completed...");
    counter = 0;
    while (counter++ < 60) {
      if (jInfo == null) {
        break;
      } else if (jInfo.getStatus().isJobComplete()) {
        break;
      }
      UtilsForTests.waitFor(100);
      jInfo = wovenClient.getJobInfo(jobId);
    }
    UtilsForTests.waitFor(1000);
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.