Package org.apache.hadoop.mapreduce.test.system

Examples of org.apache.hadoop.mapreduce.test.system.JobInfo


    // RunningJob rJob;
    RunningJob rJob = null;

    // JobInfo jInfo;
    JobInfo jInfo = null;

    //Getting the previous job numbers that are submitted.
    jobStatus = client.getAllJobs();
    prevJobsNum = jobStatus.length;

    // Run RandomWriter
    Assert.assertEquals(ToolRunner.run(job, tool, args), 0);

    //Waiting for the job to appear in the jobstatus
    jobStatus = client.getAllJobs();

    while (jobStatus.length - prevJobsNum == 0) {
      LOG.info("Waiting for the job to appear in the jobStatus");
      Thread.sleep(1000);
      jobStatus = client.getAllJobs();
    }

    //Getting the jobId of the just submitted job
    //The just submitted job is always added in the first slot of jobstatus
    id = jobStatus[0].getJobID();

    rJob = client.getJob(id);

    jInfo = wovenClient.getJobInfo(id);

    //Making sure that the job is complete.
    while (jInfo != null && !jInfo.getStatus().isJobComplete()) {
      Thread.sleep(10000);
      jInfo = wovenClient.getJobInfo(id);
    }

    cluster.getJTClient().verifyCompletedJob(id);
View Full Code Here


    RunningJob rJob =
        client.getJob(org.apache.hadoop.mapred.JobID.downgrade(slpJob
            .getJobID()));
    JobID id = rJob.getID();

    JobInfo jInfo = wovenClient.getJobInfo(id);

    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
      Thread.sleep(1000);
      jInfo = wovenClient.getJobInfo(id);
    }

    LOG.info("Waiting till job starts running one map");
    jInfo = wovenClient.getJobInfo(id);
    Assert.assertEquals(jInfo.runningMaps(), 1);

    LOG.info("waiting for another cycle to "
        + "check if the maps dont finish off");
    Thread.sleep(1000);
    jInfo = wovenClient.getJobInfo(id);
    Assert.assertEquals(jInfo.runningMaps(), 1);

    TaskInfo[] taskInfos = wovenClient.getTaskInfo(id);

    for (TaskInfo info : taskInfos) {
      LOG.info("constructing control action to signal task to finish");
      FinishTaskControlAction action =
          new FinishTaskControlAction(TaskID.downgrade(info.getTaskID()));
      for (TTClient cli : cluster.getTTClients()) {
        cli.getProxy().sendAction(action);
      }
    }

    jInfo = wovenClient.getJobInfo(id);
    int i = 1;
    if (jInfo != null) {
      while (!jInfo.getStatus().isJobComplete()) {
        Thread.sleep(1000);
        jInfo = wovenClient.getJobInfo(id);
        if (jInfo == null) {
          break;
        }
        if (i > 40) {
          Assert.fail("Controlled Job with ID : "
              + jInfo.getID()
              + " has not completed in 40 seconds after signalling.");
        }
        i++;
      }
    }
View Full Code Here

      // counter for job Loop
      countLoop++;

      TTClient tClient = null;
      JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
      LOG.info("jInfo is :" + jInfo);

      // Assert if jobInfo is null
      Assert.assertNotNull("jobInfo is null", jInfo);

      // Wait for the job to start running.
      count = 0;
      while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
        UtilsForTests.waitFor(10000);
        count++;
        jInfo = wovenClient.getJobInfo(rJob.getID());
        // If the count goes beyond a point, then break; This is to avoid
        // infinite loop under unforeseen circumstances. Testcase will anyway
View Full Code Here

      // counter for job Loop
      countLoop++;

      TTClient tClient = null;
      JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
      LOG.info("jInfo is :" + jInfo);

      // Assert if jobInfo is null
      Assert.assertNotNull("jobInfo is null", jInfo);

      // Wait for the job to start running.
      count = 0;
      while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
        UtilsForTests.waitFor(10000);
        count++;
        jInfo = wovenClient.getJobInfo(rJob.getID());
        // If the count goes beyond a point, then break; This is to avoid
        // infinite loop under unforeseen circumstances. Testcase will anyway
        // fail later.
        if (count > 10) {
          Assert.fail("job has not reached running state for more than"
              + "100 seconds. Failing at this point");
        }
      }

      LOG.info("job id is :" + rJob.getID().toString());

      TaskInfo[] taskInfos =
          cluster.getJTClient().getProxy().getTaskInfo(rJob.getID());

      boolean distCacheFileIsFound;

      for (TaskInfo taskInfo : taskInfos) {
        distCacheFileIsFound = false;
        String[] taskTrackers = taskInfo.getTaskTrackers();
        for (String taskTracker : taskTrackers) {
          // Formatting tasktracker to get just its FQDN
          taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
          LOG.info("taskTracker is :" + taskTracker);

          // The tasktrackerFound variable is initialized
          taskTrackerFound = false;

          // This will be entered from the second job onwards
          if (countLoop > 1) {
            if (taskTracker != null) {
              continueLoop = taskTrackerCollection.contains(taskTracker);
            }
            if (continueLoop) {
              taskTrackerFound = true;
            }
          }
          // Collecting the tasktrackers
          if (taskTracker != null)
            taskTrackerCollection.add(taskTracker);

          // we have loopped through two times to look for task
          // getting submitted on same tasktrackers.The same tasktracker
          // for subsequent jobs was not hit maybe because of many number
          // of tasktrackers. So, testcase has to stop here.
          if (countLoop > 1) {
            continueLoop = false;
          }

          tClient = cluster.getTTClient(taskTracker);

          // tClient maybe null because the task is already dead. Ex: setup
          if (tClient == null) {
            continue;
          }

          String[] localDirs = tClient.getMapredLocalDirs();
          int distributedFileCount = 0;
          // Go to every single path
          for (String localDir : localDirs) {
            // Public Distributed cache will always be stored under
            // mapre.local.dir/tasktracker/archive
            localDir =
                localDir
                    + Path.SEPARATOR
                    + TaskTracker.getPublicDistributedCacheDir();
            LOG.info("localDir is : " + localDir);

            // Get file status of all the directories
            // and files under that path.
            FileStatus[] fileStatuses =
                tClient.listStatus(localDir, true, true);
            for (FileStatus fileStatus : fileStatuses) {
              Path path = fileStatus.getPath();
              LOG.info("path is :" + path.toString());
              // Checking if the received path ends with
              // the distributed filename
              distCacheFileIsFound =
                  (path.toString()).endsWith(distributedFileName);
              // If file is found, check for its permission.
              // Since the file is found break out of loop
              if (distCacheFileIsFound) {
                LOG.info("PATH found is :" + path.toString());
                distributedFileCount++;
                String filename = path.getName();
                FsPermission fsPerm = fileStatus.getPermission();
                Assert.assertTrue("File Permission is not 777", fsPerm
                    .equals(new FsPermission("777")));
              }
            }
          }

          LOG.debug("The distributed FileCount is :" + distributedFileCount);
          LOG.debug("The taskTrackerFound is :" + taskTrackerFound);

          // If distributed cache is modified in dfs
          // between two job runs, it can be present more than once
          // in any of the task tracker, in which job ran.
          if (distributedFileCount != 2 && taskTrackerFound) {
            Assert.fail("The distributed cache file has to be two. "
                + "But found was " + distributedFileCount);
          } else if (distributedFileCount > 1 && !taskTrackerFound) {
            Assert.fail("The distributed cache file cannot more than one."
                + " But found was " + distributedFileCount);
          } else if (distributedFileCount < 1)
            Assert.fail("The distributed cache file is less than one. "
                + "But found was " + distributedFileCount);
          if (!distCacheFileIsFound) {
            Assert.assertEquals(
                "The distributed cache file does not exist",
                distCacheFileIsFound, false);
          }
        }
      }
      // Allow the job to continue through MR control job.
      for (TaskInfo taskInfoRemaining : taskInfos) {
        FinishTaskControlAction action =
            new FinishTaskControlAction(TaskID.downgrade(taskInfoRemaining
                .getTaskID()));
        Collection<TTClient> tts = cluster.getTTClients();
        for (TTClient cli : tts) {
          cli.getProxy().sendAction(action);
        }
      }

      // Killing the job because all the verification needed
      // for this testcase is completed.
      rJob.killJob();

      // Waiting for 3 seconds for cleanup to start
      Thread.sleep(3000);

      // Getting the last cleanup task's tasktracker also, as
      // distributed cache gets uploaded even during cleanup.
      TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(rJob.getID());
      if (myTaskInfos != null) {
        for (TaskInfo info : myTaskInfos) {
          if (info.isSetupOrCleanup()) {
            String[] taskTrackers = info.getTaskTrackers();
            for (String taskTracker : taskTrackers) {
              // Formatting tasktracker to get just its FQDN
              taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
              LOG.info("taskTracker is :" + taskTracker);
              // Collecting the tasktrackers
              if (taskTracker != null)
                taskTrackerCollection.add(taskTracker);
            }
          }
        }
      }

      // Making sure that the job is complete.
      while (jInfo != null && !jInfo.getStatus().isJobComplete()) {
        Thread.sleep(10000);
        jInfo = wovenClient.getJobInfo(rJob.getID());
      }

    } while (continueLoop);
View Full Code Here

    jconf.setOutputCommitter(theClass);
    if(!isUserKill)
    { 
      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
      JobID id = rJob.getID();
      JobInfo jInfo = wovenClient.getJobInfo(id);
      Assert.assertTrue("Job is not in PREP state",
          jInfo.getStatus().getRunState() == JobStatus.PREP);
    }
    else
    {
      //user kill job
      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
      JobInfo info = wovenClient.getJobInfo(rJob.getID());
      Assert.assertNotNull("Job Info is null",info);
      JobID id = rJob.getID();
      while (info.runningMaps() != 1) {
        Thread.sleep(1000);
        info = wovenClient.getJobInfo(id);
      }
      rJob.killJob();
    }
View Full Code Here

    JobStatus[] jobStatus = client.getAllJobs();
    String userName = jobStatus[0].getUsername();

    TTClient tClient = null;
    JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
    LOG.info("jInfo is :" + jInfo);

    //Assert if jobInfo is null
    Assert.assertNotNull("jobInfo is null", jInfo);

    //Wait for the job to start running.
    count = 0;
    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
      UtilsForTests.waitFor(10000);
      count++;
      jInfo = wovenClient.getJobInfo(rJob.getID());
      //If the count goes beyond a point, then Assert; This is to avoid
      //infinite loop under unforeseen circumstances.
View Full Code Here

    rJob.submit();
    RunningJob rJob1 =
        client.getJob(org.apache.hadoop.mapred.JobID.downgrade(rJob.getJobID()));
    JobID id = rJob.getJobID();

    JobInfo jInfo = wovenClient.getJobInfo(id);

    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
      Thread.sleep(1000);
      jInfo = wovenClient.getJobInfo(id);
    }

    LOG.info("Waiting till job starts running one map");
View Full Code Here

    slpJob.submit();
    RunningJob runJob =
        jobClient.getJob(org.apache.hadoop.mapred.JobID.downgrade(slpJob
            .getJobID()));
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);
    int counter = 0;
    while (counter < 60) {
      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
        break;
      } else {
        UtilsForTests.waitFor(1000);
        jInfo = remoteJTClient.getJobInfo(id);
      }
      counter++;
    }
    Assert.assertTrue("Job has not been started for 1 min.", counter != 60);

    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
      }
    }

    counter = 0;
    taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
    while (counter < 60) {
      if (taskInfo.getTaskStatus().length > 0) {
        if (taskInfo.getTaskStatus()[0].getRunState() == TaskStatus.State.RUNNING) {
          break;
        }
      }
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
      counter++;
    }
    Assert.assertTrue("Task has not been started for 1 min.", counter != 60);

    NetworkedJob networkJob = jobClient.new NetworkedJob(jInfo.getStatus());
    TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
    TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
    networkJob.killTask(taskAttID, false);

    LOG.info("Waiting till the job is completed...");
    while (!jInfo.getStatus().isJobComplete()) {
      UtilsForTests.waitFor(100);
      jInfo = remoteJTClient.getJobInfo(id);
    }

    Assert.assertEquals(
        "JobStatus", jInfo.getStatus().getRunState(), JobStatus.SUCCEEDED);
  }
View Full Code Here

    createInput(inputDir, conf);
    FileInputFormat.setInputPaths(jconf, inputDir);
    FileOutputFormat.setOutputPath(jconf, outputDir);
    RunningJob runJob = jobClient.submitJob(jconf);
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);
    int counter = 0;
    while (counter < 60) {
      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
        break;
      } else {
        UtilsForTests.waitFor(1000);
        jInfo = remoteJTClient.getJobInfo(id);
      }
      counter++;
    }
    Assert.assertTrue("Job has not been started for 1 min.", counter != 60);

    JobStatus[] jobStatus = jobClient.getAllJobs();
    String userName = jobStatus[0].getUsername();
    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }

    counter = 0;
    while (counter < 30) {
      if (taskInfo.getTaskStatus().length > 0) {
        if (taskInfo.getTaskStatus()[0].getRunState() == TaskStatus.State.RUNNING) {
          break;
        }
      }
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
      counter++;
    }
    Assert.assertTrue("Task has not been started for 30 sec.", counter != 30);

    tID = TaskID.downgrade(taskInfo.getTaskID());
    FinishTaskControlAction action = new FinishTaskControlAction(tID);

    String[] taskTrackers = taskInfo.getTaskTrackers();
    counter = 0;
    while (counter < 30) {
      if (taskTrackers.length != 0) {
        break;
      }
      UtilsForTests.waitFor(100);
      taskTrackers = taskInfo.getTaskTrackers();
      counter++;
    }

    String hostName = taskTrackers[0].split("_")[1];
    hostName = hostName.split(":")[0];
    ttClient = cluster.getTTClient(hostName);
    ttClient.getProxy().sendAction(action);
    String localDirs[] = ttClient.getMapredLocalDirs();
    TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
    for (String localDir : localDirs) {
      localTaskDir =
          localDir
              + "/"
              + TaskTracker.getLocalTaskDir(userName, id.toString(), taskAttID
                  .toString());
      filesStatus = ttClient.listStatus(localTaskDir, true);
      if (filesStatus.length > 0) {
        isTempFolderExists = true;
        NetworkedJob networkJob = jobClient.new NetworkedJob(jInfo.getStatus());
        networkJob.killTask(taskAttID, false);
        break;
      }
    }
View Full Code Here

    createInput(inputDir, conf);
    FileInputFormat.setInputPaths(jconf, inputDir);
    FileOutputFormat.setOutputPath(jconf, outputDir);
    RunningJob runJob = jobClient.submitJob(jconf);
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);

    int counter = 0;
    while (counter < 60) {
      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
        break;
      } else {
        UtilsForTests.waitFor(1000);
        jInfo = remoteJTClient.getJobInfo(id);
      }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.test.system.JobInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.