Examples of TTClient


Examples of org.apache.hadoop.mapreduce.test.system.TTClient

    RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);

    JobStatus[] jobStatus = client.getAllJobs();
    String userName = jobStatus[0].getUsername();

    TTClient tClient = null;
    JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
    LOG.info("jInfo is :" + jInfo);

    //Assert if jobInfo is null
    Assert.assertNotNull("jobInfo is null", jInfo);

    //Wait for the job to start running.
    count = 0;
    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
      UtilsForTests.waitFor(10000);
      count++;
      jInfo = wovenClient.getJobInfo(rJob.getID());
      //If the count goes beyond a point, then Assert; This is to avoid
      //infinite loop under unforeseen circumstances.
      if (count > 10) {
        Assert.fail("job has not reached running state for more than" +
            "100 seconds. Failing at this point");
      }
    }

    LOG.info("job id is :" + rJob.getID().toString());

    TaskInfo[] taskInfos = cluster.getJTClient().getProxy()
           .getTaskInfo(rJob.getID());

    boolean distCacheFileIsFound;

    for (TaskInfo taskInfo : taskInfos) {
      distCacheFileIsFound = false;
      String[] taskTrackers = taskInfo.getTaskTrackers();

      for(String taskTracker : taskTrackers) {
        //Getting the exact FQDN of the tasktracker from
        //the tasktracker string.
        taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
        tClient =  cluster.getTTClient(taskTracker);
        String[] localDirs = tClient.getMapredLocalDirs();
        int distributedFileCount = 0;
        String localDirOnly = null;

        boolean FileNotPresentForThisDirectoryPath = false;

        //Go to every single path
        for (String localDir : localDirs) {
          FileNotPresentForThisDirectoryPath = false;
          localDirOnly = localDir;

          //Public Distributed cache will always be stored under
          //mapred.local.dir/tasktracker/archive
          localDirOnly = localDir + Path.SEPARATOR + TaskTracker.SUBDIR +
              Path.SEPARATOR +  userName;

          //Private Distributed cache will always be stored under
          //mapre.local.dir/taskTracker/<username>/distcache
          //Checking for username directory to check if it has the
          //proper permissions
          localDir = localDir + Path.SEPARATOR +
                  TaskTracker.getPrivateDistributedCacheDir(userName);

          FileStatus fileStatusMapredLocalDirUserName = null;

          try {
            fileStatusMapredLocalDirUserName = tClient.
                            getFileStatus(localDirOnly, true);
          } catch (Exception e) {
            LOG.info("LocalDirOnly :" + localDirOnly + " not found");
            FileNotPresentForThisDirectoryPath = true;
          }

          //File will only be stored under one of the mapred.lcoal.dir
          //If other paths were hit, just continue 
          if (FileNotPresentForThisDirectoryPath)
            continue;

          Path pathMapredLocalDirUserName =
              fileStatusMapredLocalDirUserName.getPath();
          FsPermission fsPermMapredLocalDirUserName =
              fileStatusMapredLocalDirUserName.getPermission();
          Assert.assertTrue("Directory Permission is not 700",
            fsPermMapredLocalDirUserName.equals(new FsPermission("700")));

          //Get file status of all the directories
          //and files under that path.
          FileStatus[] fileStatuses = tClient.listStatus(localDir,
              true, true);
          for (FileStatus  fileStatus : fileStatuses) {
            Path path = fileStatus.getPath();
            LOG.info("path is :" + path.toString());
            //Checking if the received path ends with
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);

      //counter for job Loop
      countLoop++;

      TTClient tClient = null;
      JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
      LOG.info("jInfo is :" + jInfo);

      //Assert if jobInfo is null
      Assert.assertNotNull("jobInfo is null", jInfo);

      //Wait for the job to start running.
      count = 0;
      while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
        UtilsForTests.waitFor(10000);
        count++;
        jInfo = wovenClient.getJobInfo(rJob.getID());
        //If the count goes beyond a point, then break; This is to avoid
        //infinite loop under unforeseen circumstances. Testcase will anyway
        //fail later.
        if (count > 10) {
          Assert.fail("job has not reached running state for more than" +
            "100 seconds. Failing at this point");
        }
      }

      LOG.info("job id is :" + rJob.getID().toString());

      TaskInfo[] taskInfos = cluster.getJTClient().getProxy()
             .getTaskInfo(rJob.getID());

      boolean distCacheFileIsFound;
      
      for (TaskInfo taskInfo : taskInfos) {
        distCacheFileIsFound = false;
        String[] taskTrackers = taskInfo.getTaskTrackers();
        for (String taskTracker : taskTrackers) {
          //Formatting tasktracker to get just its FQDN
          taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
          LOG.info("taskTracker is :" + taskTracker);

          //This will be entered from the second job onwards
          if (countLoop > 1) {
            if (taskTracker != null) {
              continueLoop = taskTrackerCollection.contains(taskTracker);
            }
            if (!continueLoop) {
              break;
            }
          }

          //Collecting the tasktrackers
          if (taskTracker != null
            taskTrackerCollection.add(taskTracker);

          //we have loopped through enough number of times to look for task
          // getting submitted on same tasktrackers.The same tasktracker
          //for subsequent jobs was not hit maybe because of  many number
          //of tasktrackers. So, testcase has to stop here.
          if (countLoop > 2) {
            continueLoop = false;
          }

          tClient = cluster.getTTClient(taskTracker);

          //tClient maybe null because the task is already dead. Ex: setup
          if (tClient == null) {
            continue;
          }

          String[] localDirs = tClient.getMapredLocalDirs();
          int distributedFileCount = 0;
          //Go to every single path
          for (String localDir : localDirs) {
            //Public Distributed cache will always be stored under
            //mapre.local.dir/tasktracker/archive
            localDir = localDir + Path.SEPARATOR +
                   TaskTracker.getPublicDistributedCacheDir();
            LOG.info("localDir is : " + localDir);

            //Get file status of all the directories
            //and files under that path.
            FileStatus[] fileStatuses = tClient.listStatus(localDir,
                true, true);
            for (FileStatus  fileStatus : fileStatuses) {
              Path path = fileStatus.getPath();
              LOG.info("path is :" + path.toString());
              //Checking if the received path ends with
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);

      //counter for job Loop
      countLoop++;

      TTClient tClient = null;
      JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
      LOG.info("jInfo is :" + jInfo);

      //Assert if jobInfo is null
      Assert.assertNotNull("jobInfo is null", jInfo);

      //Wait for the job to start running.
      count = 0;
      while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
        UtilsForTests.waitFor(10000);
        count++;
        jInfo = wovenClient.getJobInfo(rJob.getID());
        //If the count goes beyond a point, then break; This is to avoid
        //infinite loop under unforeseen circumstances. Testcase will anyway
        //fail later.
        if (count > 10) {
          Assert.fail("job has not reached running state for more than" +
            "100 seconds. Failing at this point");
        }
      }

      LOG.info("job id is :" + rJob.getID().toString());

      TaskInfo[] taskInfos = cluster.getJTClient().getProxy()
             .getTaskInfo(rJob.getID());

      boolean distCacheFileIsFound;

      for (TaskInfo taskInfo : taskInfos) {
        distCacheFileIsFound = false;
        String[] taskTrackers = taskInfo.getTaskTrackers();
        for (String taskTracker : taskTrackers) {
          //Formatting tasktracker to get just its FQDN
          taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
          LOG.info("taskTracker is :" + taskTracker);

          //The tasktrackerFound variable is initialized
          taskTrackerFound = false;

          //This will be entered from the second job onwards
          if (countLoop > 1) {
            if (taskTracker != null) {
              continueLoop = taskTrackerCollection.contains(taskTracker);
            }
            if (continueLoop) {
              taskTrackerFound = true;
            }
          }
          //Collecting the tasktrackers
          if (taskTracker != null)
            taskTrackerCollection.add(taskTracker);

          //we have loopped through two times to look for task
          //getting submitted on same tasktrackers.The same tasktracker
          //for subsequent jobs was not hit maybe because of many number
          //of tasktrackers. So, testcase has to stop here.
          if (countLoop > 1) {
            continueLoop = false;
          }

          tClient = cluster.getTTClient(taskTracker);

          //tClient maybe null because the task is already dead. Ex: setup
          if (tClient == null) {
            continue;
          }

          String[] localDirs = tClient.getMapredLocalDirs();
          int distributedFileCount = 0;
          //Go to every single path
          for (String localDir : localDirs) {
            //Public Distributed cache will always be stored under
            //mapre.local.dir/tasktracker/archive
            localDir = localDir + Path.SEPARATOR +
                   TaskTracker.getPublicDistributedCacheDir();
            LOG.info("localDir is : " + localDir);

            //Get file status of all the directories
            //and files under that path.
            FileStatus[] fileStatuses = tClient.listStatus(localDir,
                true, true);
            for (FileStatus  fileStatus : fileStatuses) {
              Path path = fileStatus.getPath();
              LOG.info("path is :" + path.toString());
              //Checking if the received path ends with
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

    TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id);
    boolean isOneTaskStored = false;
    String sometaskpid = null;
    org.apache.hadoop.mapreduce.TaskAttemptID sometaskId = null;
    TTClient myCli = null;
    for(TaskInfo info : myTaskInfos) {
      if(!info.isSetupOrCleanup()) {
        String[] taskTrackers = info.getTaskTrackers();
        for(String taskTracker : taskTrackers) {
          TTInfo ttInfo = wovenClient.getTTInfo(taskTracker);
          TTClient ttCli =  cluster.getTTClient(ttInfo.getStatus().getHost());
          TaskID taskId = info.getTaskID();
          TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(taskId);
          Assert.assertNotNull(ttTaskInfo);
          Assert.assertNotNull(ttTaskInfo.getConf());
          Assert.assertNotNull(ttTaskInfo.getUser());
          Assert.assertTrue(ttTaskInfo.getTaskStatus().getProgress() >= 0.0);
          Assert.assertTrue(ttTaskInfo.getTaskStatus().getProgress() <= 1.0);
          //Get the pid of the task attempt. The task need not have
          //reported the pid of the task by the time we are checking
          //the pid. So perform null check.
          String pid = ttTaskInfo.getPid();
          int i = 1;
          while(pid.isEmpty()) {
            Thread.sleep(1000);
            LOG.info("Waiting for task to report its pid back");
            ttTaskInfo = ttCli.getProxy().getTask(taskId);
            pid = ttTaskInfo.getPid();
            if(i == 40) {
              Assert.fail("The task pid not reported for 40 seconds.");
            }
            i++;
          }
          if(!isOneTaskStored) {
            sometaskpid = pid;
            sometaskId = ttTaskInfo.getTaskStatus().getTaskID();
            myCli = ttCli;
            isOneTaskStored = true;
          }
          LOG.info("verified task progress to be between 0 and 1");
          State state = ttTaskInfo.getTaskStatus().getRunState();
          if (ttTaskInfo.getTaskStatus().getProgress() < 1.0 &&
              ttTaskInfo.getTaskStatus().getProgress() >0.0) {
            Assert.assertEquals(TaskStatus.State.RUNNING, state);
            LOG.info("verified run state as " + state);
          }
          FinishTaskControlAction action = new FinishTaskControlAction(
              org.apache.hadoop.mapred.TaskID.downgrade(info.getTaskID()));
          ttCli.getProxy().sendAction(action);
        }
      }
    }
    rJob.killJob();
    int i = 1;
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

  public void testDirCleanupAfterTaskKilled() throws IOException,
          InterruptedException {
    TaskInfo taskInfo = null;
    boolean isTempFolderExists = false;
    String localTaskDir = null;
    TTClient ttClient = null;
    TaskID tID = null;
    FileStatus filesStatus [] = null;
    Path inputDir = new Path("input");
    Path outputDir = new Path("output");
    Configuration conf = new Configuration(cluster.getConf());
    JobConf jconf = new JobConf(conf);
    jconf.setJobName("Word Count");
    jconf.setJarByClass(WordCount.class);
    jconf.setMapperClass(WordCount.MapClass.class);
    jconf.setCombinerClass(WordCount.Reduce.class);
    jconf.setReducerClass(WordCount.Reduce.class);
    jconf.setNumMapTasks(1);
    jconf.setNumReduceTasks(1);
    jconf.setMaxMapAttempts(20);
    jconf.setMaxReduceAttempts(20);
    jconf.setOutputKeyClass(Text.class);
    jconf.setOutputValueClass(IntWritable.class);

    cleanup(inputDir, conf);
    cleanup(outputDir, conf);
    createInput(inputDir, conf);
    FileInputFormat.setInputPaths(jconf, inputDir);
    FileOutputFormat.setOutputPath(jconf, outputDir);
    RunningJob runJob = jobClient.submitJob(jconf);
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);
    int counter = 0;
    while (counter < 60) {
      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
        break;
      } else {
        UtilsForTests.waitFor(1000);
        jInfo = remoteJTClient.getJobInfo(id);
      }
      counter ++;
    }
    Assert.assertTrue("Job has not been started for 1 min.", counter != 60);

    JobStatus[] jobStatus = jobClient.getAllJobs();
    String userName = jobStatus[0].getUsername();
    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }

    counter = 0;
    while (counter < 30) {
      if (taskInfo.getTaskStatus().length > 0) {
        if (taskInfo.getTaskStatus()[0].getRunState()
                == TaskStatus.State.RUNNING) {
          break;
        }
      }
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
      counter ++;
    }
    Assert.assertTrue("Task has not been started for 30 sec.",
            counter != 30);

    tID = TaskID.downgrade(taskInfo.getTaskID());
    FinishTaskControlAction action = new FinishTaskControlAction(tID);

    String[] taskTrackers = taskInfo.getTaskTrackers();
    counter = 0;
    while (counter < 30) {
      if (taskTrackers.length != 0) {
        break;
      }
      UtilsForTests.waitFor(100);
      taskTrackers = taskInfo.getTaskTrackers();
      counter ++;
    }

    String hostName = taskTrackers[0].split("_")[1];
    hostName = hostName.split(":")[0];
    ttClient = cluster.getTTClient(hostName);
    ttClient.getProxy().sendAction(action);
    String localDirs[] = ttClient.getMapredLocalDirs();
    TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
    for (String localDir : localDirs) {
      localTaskDir = localDir + "/"
              + TaskTracker.getLocalTaskDir(userName,
                      id.toString(), taskAttID.toString());
      filesStatus = ttClient.listStatus(localTaskDir, true);
      if (filesStatus.length > 0) {
        isTempFolderExists = true;
        NetworkedJob networkJob = jobClient.new NetworkedJob(jInfo.getStatus());
        networkJob.killTask(taskAttID, false);
        break;
      }
    }

    Assert.assertTrue("Task Attempt directory " +
            taskAttID + " has not been found while task was running.",
                    isTempFolderExists);
    taskInfo = remoteJTClient.getTaskInfo(tID);

    counter = 0;
    while (counter < 60) {
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(tID);
      filesStatus = ttClient.listStatus(localTaskDir, true);
      if (filesStatus.length == 0) {
        break;
      }
      counter ++;
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

   * after failing the task.
   */
  @Test
  public void testDirCleanupAfterTaskFailed() throws IOException,
          InterruptedException {
    TTClient ttClient = null;
    FileStatus filesStatus [] = null;
    String localTaskDir = null;
    TaskInfo taskInfo = null;
    TaskID tID = null;
    boolean isTempFolderExists = false;
    Path inputDir = new Path("input");
    Path outputDir = new Path("output");
    Configuration conf = new Configuration(cluster.getConf());
    JobConf jconf = new JobConf(conf);
    jconf.setJobName("Task Failed job");
    jconf.setJarByClass(UtilsForTests.class);
    jconf.setMapperClass(FailedMapperClass.class);
    jconf.setNumMapTasks(1);
    jconf.setNumReduceTasks(0);
    jconf.setMaxMapAttempts(1);
    cleanup(inputDir, conf);
    cleanup(outputDir, conf);
    createInput(inputDir, conf);
    FileInputFormat.setInputPaths(jconf, inputDir);
    FileOutputFormat.setOutputPath(jconf, outputDir);
    RunningJob runJob = jobClient.submitJob(jconf);
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);
   
    int counter = 0;
    while (counter < 60) {
      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
        break;
      } else {
        UtilsForTests.waitFor(1000);
        jInfo = remoteJTClient.getJobInfo(id);
      }
      counter ++;
    }
    Assert.assertTrue("Job has not been started for 1 min.", counter != 60);

    JobStatus[] jobStatus = jobClient.getAllJobs();
    String userName = jobStatus[0].getUsername();
    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }

    tID = TaskID.downgrade(taskInfo.getTaskID());
    FinishTaskControlAction action = new FinishTaskControlAction(tID);
    String[] taskTrackers = taskInfo.getTaskTrackers();
    counter = 0;
    while (counter < 30) {
      if (taskTrackers.length != 0) {
        break;
      }
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
      taskTrackers = taskInfo.getTaskTrackers();
      counter ++;
    }
    Assert.assertTrue("Task tracker not found.", taskTrackers.length != 0);
    String hostName = taskTrackers[0].split("_")[1];
    hostName = hostName.split(":")[0];
    ttClient = cluster.getTTClient(hostName);
    ttClient.getProxy().sendAction(action);

    counter = 0;
    while(counter < 60) {
      if (taskInfo.getTaskStatus().length > 0) {
        if (taskInfo.getTaskStatus()[0].getRunState()
                == TaskStatus.State.RUNNING) {
          break;
        }
      }
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
      counter ++;
    }
    Assert.assertTrue("Task has not been started for 1 min.",
            counter != 60);

    String localDirs[] = ttClient.getMapredLocalDirs();
    TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
    for (String localDir : localDirs) {
      localTaskDir = localDir + "/"
              + TaskTracker.getLocalTaskDir(userName,
                      id.toString(), taskAttID.toString());
      filesStatus = ttClient.listStatus(localTaskDir, true);
      if (filesStatus.length > 0) {
        isTempFolderExists = true;
        break;
      }
    }

    taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
    Assert.assertTrue("Task Attempt directory " +
            taskAttID + " has not been found while task was running.",
                    isTempFolderExists);
    counter = 0;
    while (counter < 30) {
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(tID);
      counter ++;
    }

    Assert.assertEquals("Task status has not been changed to FAILED.",
            taskInfo.getTaskStatus()[0].getRunState(),
                    TaskStatus.State.FAILED);

    filesStatus = ttClient.listStatus(localTaskDir, true);
    Assert.assertTrue("Temporary folder has not been cleanup.",
            filesStatus.length == 0);
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

    LOG.info("MAX_MAP_TASK_ATTEMPTS is : " + MAX_MAP_TASK_ATTEMPTS);

    Assert.assertTrue(MAX_MAP_TASK_ATTEMPTS > 0);

    TTClient tClient = null;
    TTClient[] ttClients = null;

    JobInfo jInfo = remoteJTClient.getJobInfo(rJob.getID());

    //Assert if jobInfo is null
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

    List<TTClient> tClient = cluster.getTTClients();
    int count = tClient.size();
    int moreThan25Per = count / 4 +1;
    LOG.info ("More than 25 % of TTclient is "+moreThan25Per);
    for (int i=0; i < moreThan25Per ; ++i) {
      TTClient client = tClient.get(i);
      bListedTT.add(client);
      blackListTT(client);
    }
    //Now run the high ram job
    JobClient jobClient = cluster.getJTClient().getClient();
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

   * @throws Exception in case of test errors
   */
  @Test
  public void testInduceError() throws Exception {
    LOG.info("running testInduceError");
    TTClient client = cluster.getTTClient();
    Configuration tConf= client.getProxy().getDaemonConf();   
    tConf.set("mapred.task.tracker.report.address",
        cluster.getConf().get("mapred.task.tracker.report.address"));
    String defaultHealthScript = tConf.get("mapred.healthChecker.script.path");
    Assert.assertTrue("Health script was not set", defaultHealthScript != null);       
    tConf.set("mapred.healthChecker.script.path", remotePath+File.separator+
        healthScriptError);
    tConf.setInt("mapred.healthChecker.interval", 1000);
    helper.copyFileToRemoteHost(healthScriptError, client.getHostName(),
        remotePath, cluster);
    cluster.restartDaemonWithNewConfig(client, "mapred-site.xml", tConf,
        Role.TT);
    //make sure the TT is blacklisted
    helper.verifyTTBlackList(tConf, client,
        "ERROR Task Tracker status is fatal", cluster);
    //Now put back the task tracker in a healthy state
    cluster.restart(client, Role.TT);
    //now do the opposite of blacklist verification
    tConf = client.getProxy().getDaemonConf();
    helper.deleteFileOnRemoteHost(remotePath+File.separator+healthScriptError,
        client.getHostName());
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

  public void testDirCleanupAfterTaskKilled()
      throws IOException, InterruptedException {
    TaskInfo taskInfo = null;
    boolean isTempFolderExists = false;
    String localTaskDir = null;
    TTClient ttClient = null;
    TaskID tID = null;
    FileStatus filesStatus[] = null;
    Path inputDir = new Path("input");
    Path outputDir = new Path("output");
    Configuration conf = new Configuration(cluster.getConf());
    JobConf jconf = new JobConf(conf);
    jconf.setJobName("Word Count");
    jconf.setJarByClass(WordCount.class);
    jconf.setMapperClass(WordCount.MapClass.class);
    jconf.setCombinerClass(WordCount.Reduce.class);
    jconf.setReducerClass(WordCount.Reduce.class);
    jconf.setNumMapTasks(1);
    jconf.setNumReduceTasks(1);
    jconf.setMaxMapAttempts(20);
    jconf.setMaxReduceAttempts(20);
    jconf.setOutputKeyClass(Text.class);
    jconf.setOutputValueClass(IntWritable.class);

    cleanup(inputDir, conf);
    cleanup(outputDir, conf);
    createInput(inputDir, conf);
    FileInputFormat.setInputPaths(jconf, inputDir);
    FileOutputFormat.setOutputPath(jconf, outputDir);
    RunningJob runJob = jobClient.submitJob(jconf);
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);
    int counter = 0;
    while (counter < 60) {
      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
        break;
      } else {
        UtilsForTests.waitFor(1000);
        jInfo = remoteJTClient.getJobInfo(id);
      }
      counter++;
    }
    Assert.assertTrue("Job has not been started for 1 min.", counter != 60);

    JobStatus[] jobStatus = jobClient.getAllJobs();
    String userName = jobStatus[0].getUsername();
    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }

    counter = 0;
    while (counter < 30) {
      if (taskInfo.getTaskStatus().length > 0) {
        if (taskInfo.getTaskStatus()[0].getRunState() == TaskStatus.State.RUNNING) {
          break;
        }
      }
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
      counter++;
    }
    Assert.assertTrue("Task has not been started for 30 sec.", counter != 30);

    tID = TaskID.downgrade(taskInfo.getTaskID());
    FinishTaskControlAction action = new FinishTaskControlAction(tID);

    String[] taskTrackers = taskInfo.getTaskTrackers();
    counter = 0;
    while (counter < 30) {
      if (taskTrackers.length != 0) {
        break;
      }
      UtilsForTests.waitFor(100);
      taskTrackers = taskInfo.getTaskTrackers();
      counter++;
    }

    String hostName = taskTrackers[0].split("_")[1];
    hostName = hostName.split(":")[0];
    ttClient = cluster.getTTClient(hostName);
    ttClient.getProxy().sendAction(action);
    String localDirs[] = ttClient.getMapredLocalDirs();
    TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
    for (String localDir : localDirs) {
      localTaskDir =
          localDir
              + "/"
              + TaskTracker.getLocalTaskDir(userName, id.toString(), taskAttID
                  .toString());
      filesStatus = ttClient.listStatus(localTaskDir, true);
      if (filesStatus.length > 0) {
        isTempFolderExists = true;
        NetworkedJob networkJob = new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster);
        networkJob.killTask(taskAttID, false);
        break;
      }
    }

    Assert.assertTrue(
        "Task Attempt directory "
            + taskAttID + " has not been found while task was running.",
        isTempFolderExists);
    taskInfo = remoteJTClient.getTaskInfo(tID);

    counter = 0;
    while (counter < 60) {
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(tID);
      filesStatus = ttClient.listStatus(localTaskDir, true);
      if (filesStatus.length == 0) {
        break;
      }
      counter++;
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.