Examples of JobInfo


Examples of org.apache.hadoop.mapreduce.test.system.JobInfo

      //counter for job Loop
      countLoop++;

      TTClient tClient = null;
      JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
      LOG.info("jInfo is :" + jInfo);

      //Assert if jobInfo is null
      Assert.assertNotNull("jobInfo is null", jInfo);

      //Wait for the job to start running.
      count = 0;
      while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
        UtilsForTests.waitFor(10000);
        count++;
        jInfo = wovenClient.getJobInfo(rJob.getID());
        //If the count goes beyond a point, then break; This is to avoid
        //infinite loop under unforeseen circumstances. Testcase will anyway
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JobInfo

      //counter for job Loop
      countLoop++;

      TTClient tClient = null;
      JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
      LOG.info("jInfo is :" + jInfo);

      //Assert if jobInfo is null
      Assert.assertNotNull("jobInfo is null", jInfo);

      //Wait for the job to start running.
      count = 0;
      while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
        UtilsForTests.waitFor(10000);
        count++;
        jInfo = wovenClient.getJobInfo(rJob.getID());
        //If the count goes beyond a point, then break; This is to avoid
        //infinite loop under unforeseen circumstances. Testcase will anyway
        //fail later.
        if (count > 10) {
          Assert.fail("job has not reached running state for more than" +
            "100 seconds. Failing at this point");
        }
      }

      LOG.info("job id is :" + rJob.getID().toString());

      TaskInfo[] taskInfos = cluster.getJTClient().getProxy()
             .getTaskInfo(rJob.getID());

      boolean distCacheFileIsFound;

      for (TaskInfo taskInfo : taskInfos) {
        distCacheFileIsFound = false;
        String[] taskTrackers = taskInfo.getTaskTrackers();
        for (String taskTracker : taskTrackers) {
          //Formatting tasktracker to get just its FQDN
          taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
          LOG.info("taskTracker is :" + taskTracker);

          //The tasktrackerFound variable is initialized
          taskTrackerFound = false;

          //This will be entered from the second job onwards
          if (countLoop > 1) {
            if (taskTracker != null) {
              continueLoop = taskTrackerCollection.contains(taskTracker);
            }
            if (continueLoop) {
              taskTrackerFound = true;
            }
          }
          //Collecting the tasktrackers
          if (taskTracker != null)
            taskTrackerCollection.add(taskTracker);

          //we have loopped through two times to look for task
          //getting submitted on same tasktrackers.The same tasktracker
          //for subsequent jobs was not hit maybe because of many number
          //of tasktrackers. So, testcase has to stop here.
          if (countLoop > 1) {
            continueLoop = false;
          }

          tClient = cluster.getTTClient(taskTracker);

          //tClient maybe null because the task is already dead. Ex: setup
          if (tClient == null) {
            continue;
          }

          String[] localDirs = tClient.getMapredLocalDirs();
          int distributedFileCount = 0;
          //Go to every single path
          for (String localDir : localDirs) {
            //Public Distributed cache will always be stored under
            //mapre.local.dir/tasktracker/archive
            localDir = localDir + Path.SEPARATOR +
                   TaskTracker.getPublicDistributedCacheDir();
            LOG.info("localDir is : " + localDir);

            //Get file status of all the directories
            //and files under that path.
            FileStatus[] fileStatuses = tClient.listStatus(localDir,
                true, true);
            for (FileStatus  fileStatus : fileStatuses) {
              Path path = fileStatus.getPath();
              LOG.info("path is :" + path.toString());
              //Checking if the received path ends with
              //the distributed filename
              distCacheFileIsFound = (path.toString()).
                  endsWith(distributedFileName);
              //If file is found, check for its permission.
              //Since the file is found break out of loop
              if (distCacheFileIsFound){
                LOG.info("PATH found is :" + path.toString());
                distributedFileCount++;
                String filename = path.getName();
                FsPermission fsPerm = fileStatus.getPermission();
                Assert.assertTrue("File Permission is not 777",
                    fsPerm.equals(new FsPermission("777")));
              }
            }
          }

          LOG.debug("The distributed FileCount is :" + distributedFileCount);
          LOG.debug("The taskTrackerFound is :" + taskTrackerFound);

          // If distributed cache is modified in dfs
          // between two job runs, it can be present more than once
          // in any of the task tracker, in which job ran.
          if (distributedFileCount != 2 && taskTrackerFound) {
            Assert.fail("The distributed cache file has to be two. " +
                "But found was " + distributedFileCount);
          } else if (distributedFileCount > 1 && !taskTrackerFound) {
            Assert.fail("The distributed cache file cannot more than one." +
                " But found was " + distributedFileCount);
          } else if (distributedFileCount < 1)
            Assert.fail("The distributed cache file is less than one. " +
                "But found was " + distributedFileCount);
          if (!distCacheFileIsFound) {
            Assert.assertEquals("The distributed cache file does not exist",
                distCacheFileIsFound, false);
          }
        }
      }
      //Allow the job to continue through MR control job.
      for (TaskInfo taskInfoRemaining : taskInfos) {
        FinishTaskControlAction action = new FinishTaskControlAction(TaskID
           .downgrade(taskInfoRemaining.getTaskID()));
        Collection<TTClient> tts = cluster.getTTClients();
        for (TTClient cli : tts) {
          cli.getProxy().sendAction(action);
        }
      }

      //Killing the job because all the verification needed
      //for this testcase is completed.
      rJob.killJob();

      //Waiting for 3 seconds for cleanup to start
      Thread.sleep(3000);

      //Getting the last cleanup task's tasktracker also, as
      //distributed cache gets uploaded even during cleanup.
      TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(rJob.getID());
      if (myTaskInfos != null) {
        for(TaskInfo info : myTaskInfos) {
          if(info.isSetupOrCleanup()) {
            String[] taskTrackers = info.getTaskTrackers();
            for(String taskTracker : taskTrackers) {
              //Formatting tasktracker to get just its FQDN
              taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
              LOG.info("taskTracker is :" + taskTracker);
              //Collecting the tasktrackers
              if (taskTracker != null)
                taskTrackerCollection.add(taskTracker);
            }   
          }
        }
      }

      //Making sure that the job is complete.
      while (jInfo != null && !jInfo.getStatus().isJobComplete()) {
        Thread.sleep(10000);
        jInfo = wovenClient.getJobInfo(rJob.getID());
      }

    } while (continueLoop);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JobInfo

    jconf.setOutputCommitter(theClass);
    if(!isUserKill)
    { 
      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
      JobID id = rJob.getID();
      JobInfo jInfo = wovenClient.getJobInfo(id);
      Assert.assertTrue("Job is not in PREP state",
          jInfo.getStatus().getRunState() == JobStatus.PREP);
    }
    else
    {
      //user kill job
      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
      JobInfo info = wovenClient.getJobInfo(rJob.getID());
      Assert.assertNotNull("Job Info is null",info);
      JobID id = rJob.getID();
      while (info.runningMaps() != 1) {
        Thread.sleep(1000);
        info = wovenClient.getJobInfo(id);
      }
      rJob.killJob();
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JobInfo

    JobClient client = cluster.getJTClient().getClient();

    RunningJob rJob = client.submitJob(new JobConf(conf));
    JobID id = rJob.getID();

    JobInfo jInfo = wovenClient.getJobInfo(id);

    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
      Thread.sleep(1000);
      jInfo = wovenClient.getJobInfo(id);
    }

    LOG.info("Waiting till job starts running one map");
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JobInfo

    JobConf jobConf = new JobConf(conf);
    jobConf.setMaxMapAttempts(20);
    jobConf.setMaxReduceAttempts(20);
    RunningJob runJob = jobClient.submitJob(jobConf);
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);
    int counter = 0;
    while (counter < 60) {
      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
        break;
      } else {
        UtilsForTests.waitFor(1000);
        jInfo = remoteJTClient.getJobInfo(id);
      }
      counter ++;
    }
    Assert.assertTrue("Job has not been started for 1 min.", counter != 60);

    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
      }
    }

    counter = 0;
    taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
    while (counter < 60) {
      if (taskInfo.getTaskStatus().length > 0) {
        if (taskInfo.getTaskStatus()[0].getRunState()
                == TaskStatus.State.RUNNING) {
          break;
        }
      }
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
      counter++;
    }
    Assert.assertTrue("Task has not been started for 1 min.", counter != 60);

    NetworkedJob networkJob = jobClient.new NetworkedJob(jInfo.getStatus());
    TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
    TaskAttemptID taskAttID = new TaskAttemptID(tID , 0);
    networkJob.killTask(taskAttID, false);

    LOG.info("Waiting till the job is completed...");
    while (!jInfo.getStatus().isJobComplete()) {
      UtilsForTests.waitFor(100);
      jInfo = remoteJTClient.getJobInfo(id);
    }

    Assert.assertEquals("JobStatus", jInfo.getStatus().getRunState(),
            JobStatus.SUCCEEDED);
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JobInfo

    createInput(inputDir, conf);
    FileInputFormat.setInputPaths(jconf, inputDir);
    FileOutputFormat.setOutputPath(jconf, outputDir);
    RunningJob runJob = jobClient.submitJob(jconf);
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);
    int counter = 0;
    while (counter < 60) {
      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
        break;
      } else {
        UtilsForTests.waitFor(1000);
        jInfo = remoteJTClient.getJobInfo(id);
      }
      counter ++;
    }
    Assert.assertTrue("Job has not been started for 1 min.", counter != 60);

    JobStatus[] jobStatus = jobClient.getAllJobs();
    String userName = jobStatus[0].getUsername();
    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }

    counter = 0;
    while (counter < 30) {
      if (taskInfo.getTaskStatus().length > 0) {
        if (taskInfo.getTaskStatus()[0].getRunState()
                == TaskStatus.State.RUNNING) {
          break;
        }
      }
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
      counter ++;
    }
    Assert.assertTrue("Task has not been started for 30 sec.",
            counter != 30);

    tID = TaskID.downgrade(taskInfo.getTaskID());
    FinishTaskControlAction action = new FinishTaskControlAction(tID);

    String[] taskTrackers = taskInfo.getTaskTrackers();
    counter = 0;
    while (counter < 30) {
      if (taskTrackers.length != 0) {
        break;
      }
      UtilsForTests.waitFor(100);
      taskTrackers = taskInfo.getTaskTrackers();
      counter ++;
    }

    String hostName = taskTrackers[0].split("_")[1];
    hostName = hostName.split(":")[0];
    ttClient = cluster.getTTClient(hostName);
    ttClient.getProxy().sendAction(action);
    String localDirs[] = ttClient.getMapredLocalDirs();
    TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
    for (String localDir : localDirs) {
      localTaskDir = localDir + "/"
              + TaskTracker.getLocalTaskDir(userName,
                      id.toString(), taskAttID.toString());
      filesStatus = ttClient.listStatus(localTaskDir, true);
      if (filesStatus.length > 0) {
        isTempFolderExists = true;
        NetworkedJob networkJob = jobClient.new NetworkedJob(jInfo.getStatus());
        networkJob.killTask(taskAttID, false);
        break;
      }
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JobInfo

    createInput(inputDir, conf);
    FileInputFormat.setInputPaths(jconf, inputDir);
    FileOutputFormat.setOutputPath(jconf, outputDir);
    RunningJob runJob = jobClient.submitJob(jconf);
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);
   
    int counter = 0;
    while (counter < 60) {
      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
        break;
      } else {
        UtilsForTests.waitFor(1000);
        jInfo = remoteJTClient.getJobInfo(id);
      }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.JobInfo

    Assert.assertTrue(MAX_MAP_TASK_ATTEMPTS > 0);

    TTClient tClient = null;
    TTClient[] ttClients = null;

    JobInfo jInfo = remoteJTClient.getJobInfo(rJob.getID());

    //Assert if jobInfo is null
    Assert.assertNotNull(jInfo.getStatus().getRunState());

    //Wait for the job to start running.
    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
      try {
        Thread.sleep(10000);
      } catch (InterruptedException e) {};
      jInfo = remoteJTClient.getJobInfo(rJob.getID());
    }

    //Temporarily store the jobid to use it later for comparision.
    JobID jobidStore = rJob.getID();
    jobidStore = JobID.downgrade(jobidStore);
    LOG.info("job id is :" + jobidStore.toString());

    TaskInfo[] taskInfos = null;

    //After making sure that the job is running,
    //the test execution has to make sure that
    //at least one task has started running before continuing.
    boolean runningCount = false;
    int count = 0;
    do {
      taskInfos = cluster.getJTClient().getProxy()
        .getTaskInfo(rJob.getID());
      runningCount = false;
      for (TaskInfo taskInfo : taskInfos) {
        TaskStatus[] taskStatuses = taskInfo.getTaskStatus();
        if (taskStatuses.length > 0){
          LOG.info("taskStatuses[0].getRunState() is :" +
            taskStatuses[0].getRunState());
          if (taskStatuses[0].getRunState() == TaskStatus.State.RUNNING){
            runningCount = true;
            break;
          } else {
            LOG.info("Sleeping 5 seconds");
            Thread.sleep(5000);
          }
        }
      }
      count++;
      //If the count goes beyond a point, then break; This is to avoid
      //infinite loop under unforeseen circumstances. Testcase will anyway
      //fail later.
      if (count > 10) {
        Assert.fail("Since the sleep count has reached beyond a point" +
          "failing at this point");
      }
    } while (!runningCount);

    //This whole module is about getting the task Attempt id
    //of one task and killing it MAX_MAP_TASK_ATTEMPTS times,
    //whenever it re-attempts to run.
    String taskIdKilled = null;
    for (int i = 0 ; i<MAX_MAP_TASK_ATTEMPTS; i++) {
      taskInfos = cluster.getJTClient().getProxy()
          .getTaskInfo(rJob.getID());

      for (TaskInfo taskInfo : taskInfos) {
        TaskAttemptID taskAttemptID;
        if (!taskInfo.isSetupOrCleanup()) {
          //This is the task which is going to be killed continously in
          //all its task attempts.The first task is getting picked up.
          TaskID taskid = TaskID.downgrade(taskInfo.getTaskID());
          LOG.info("taskid is :" + taskid);
          if (i==0) {
            taskIdKilled = taskid.toString();
            taskAttemptID = new TaskAttemptID(taskid, i);
            LOG.info("taskAttemptid going to be killed is : " + taskAttemptID);
            (jobClient.new NetworkedJob(jInfo.getStatus())).
                killTask(taskAttemptID,true);
            checkTaskCompletionEvent(taskAttemptID, jInfo);
            break;
          } else {
            if (taskIdKilled.equals(taskid.toString())) {
              taskAttemptID = new TaskAttemptID(taskid, i);
              LOG.info("taskAttemptid going to be killed is : " +
                  taskAttemptID);
              (jobClient.new NetworkedJob(jInfo.getStatus())).
                  killTask(taskAttemptID,true);
              checkTaskCompletionEvent(taskAttemptID,jInfo);
              break;
            }
          }
        }
      }
    }
    //Making sure that the job is complete.
    while (jInfo != null && !jInfo.getStatus().isJobComplete()) {
      Thread.sleep(10000);
      jInfo = remoteJTClient.getJobInfo(rJob.getID());
    }

    //Making sure that the correct jobstatus is got from all the jobs
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo

      // getAllJobs only gives you a partial we want a full
      Job fullJob = appCtx.getJob(job.getID());
      if (fullJob == null) {
        continue;
      }
      allJobs.add(new JobInfo(fullJob, hasAccess(fullJob, hsr)));
    }
    return allJobs;
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo

      html.
        p()._("Sorry, ", jid, " not found.")._();
      return;
    }
    List<AMInfo> amInfos = j.getAMInfos();
    JobInfo job = new JobInfo(j);
    ResponseInfo infoBlock = info("Job Overview").
        _("Job Name:", job.getName()).
        _("User Name:", job.getUserName()).
        _("Queue:", job.getQueueName()).
        _("State:", job.getState()).
        _("Uberized:", job.isUber()).
        _("Submitted:", new Date(job.getSubmitTime())).
        _("Started:", new Date(job.getStartTime())).
        _("Finished:", new Date(job.getFinishTime())).
        _("Elapsed:", StringUtils.formatTime(
            Times.elapsed(job.getStartTime(), job.getFinishTime(), false)));
   
    String amString =
        amInfos.size() == 1 ? "ApplicationMaster" : "ApplicationMasters";
   
    // todo - switch to use JobInfo
    List<String> diagnostics = j.getDiagnostics();
    if(diagnostics != null && !diagnostics.isEmpty()) {
      StringBuffer b = new StringBuffer();
      for(String diag: diagnostics) {
        b.append(diag);
      }
      infoBlock._("Diagnostics:", b.toString());
    }

    if(job.getNumMaps() > 0) {
      infoBlock._("Average Map Time", StringUtils.formatTime(job.getAvgMapTime()));
    }
    if(job.getNumReduces() > 0) {
      infoBlock._("Average Shuffle Time", StringUtils.formatTime(job.getAvgShuffleTime()));
      infoBlock._("Average Merge Time", StringUtils.formatTime(job.getAvgMergeTime()));
      infoBlock._("Average Reduce Time", StringUtils.formatTime(job.getAvgReduceTime()));
    }

    for (ConfEntryInfo entry : job.getAcls()) {
      infoBlock._("ACL "+entry.getName()+":", entry.getValue());
    }
    DIV<Hamlet> div = html.
      _(InfoBlock.class).
      div(_INFO_WRAP);
   
      // MRAppMasters Table
        TABLE<DIV<Hamlet>> table = div.table("#job");
        table.
          tr().
            th(amString).
          _().
          tr().
            th(_TH, "Attempt Number").
            th(_TH, "Start Time").
            th(_TH, "Node").
            th(_TH, "Logs").
            _();
        boolean odd = false;
          for (AMInfo amInfo : amInfos) {
            AMAttemptInfo attempt = new AMAttemptInfo(amInfo,
                job.getId(), job.getUserName(), "", "");
            table.tr((odd = !odd) ? _ODD : _EVEN).
              td(String.valueOf(attempt.getAttemptId())).
              td(new Date(attempt.getStartTime()).toString()).
              td().a(".nodelink", url(MRWebAppUtil.getYARNWebappScheme(),
                  attempt.getNodeHttpAddress()),
                  attempt.getNodeHttpAddress())._().
              td().a(".logslink", url(attempt.getShortLogsLink()),
                      "logs")._().
            _();
          }
          table._();
          div._();
         
       
        html.div(_INFO_WRAP).       
     
      // Tasks table
        table("#job").
          tr().
            th(_TH, "Task Type").
            th(_TH, "Total").
            th(_TH, "Complete")._().
          tr(_ODD).
            th().
              a(url("tasks", jid, "m"), "Map")._().
            td(String.valueOf(String.valueOf(job.getMapsTotal()))).
            td(String.valueOf(String.valueOf(job.getMapsCompleted())))._().
          tr(_EVEN).
            th().
              a(url("tasks", jid, "r"), "Reduce")._().
            td(String.valueOf(String.valueOf(job.getReducesTotal()))).
            td(String.valueOf(String.valueOf(job.getReducesCompleted())))._()
          ._().

        // Attempts table
        table("#job").
        tr().
          th(_TH, "Attempt Type").
          th(_TH, "Failed").
          th(_TH, "Killed").
          th(_TH, "Successful")._().
        tr(_ODD).
          th("Maps").
          td().a(url("attempts", jid, "m",
              TaskAttemptStateUI.FAILED.toString()),
              String.valueOf(job.getFailedMapAttempts()))._().
          td().a(url("attempts", jid, "m",
              TaskAttemptStateUI.KILLED.toString()),
              String.valueOf(job.getKilledMapAttempts()))._().
          td().a(url("attempts", jid, "m",
              TaskAttemptStateUI.SUCCESSFUL.toString()),
              String.valueOf(job.getSuccessfulMapAttempts()))._().
        _().
        tr(_EVEN).
          th("Reduces").
          td().a(url("attempts", jid, "r",
              TaskAttemptStateUI.FAILED.toString()),
              String.valueOf(job.getFailedReduceAttempts()))._().
          td().a(url("attempts", jid, "r",
              TaskAttemptStateUI.KILLED.toString()),
              String.valueOf(job.getKilledReduceAttempts()))._().
          td().a(url("attempts", jid, "r",
              TaskAttemptStateUI.SUCCESSFUL.toString()),
              String.valueOf(job.getSuccessfulReduceAttempts()))._().
         _().
       _().
     _();
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.