Package org.apache.hadoop.examples

Examples of org.apache.hadoop.examples.SleepJob$SleepInputFormat


                         "-m", "1",
                         "-r", "1",
                         "-mt", "1000",
                         "-rt", "1000",
                         "-recordt","100"};
      SleepJob job = new SleepJob();
      JobConf jobConf = new JobConf(conf);
      int exitStatus = ToolRunner.run(jobConf, job, jobArgs);
      Assert.assertEquals("Exit Code:", 0, exitStatus);
      UtilsForTests.waitFor(100);
      JobClient jobClient = jtClient.getClient();
View Full Code Here


    LOG.info("jobTrackerUserName is :" + jobTrackerUserName);

    int count = 0;

    SleepJob job = new SleepJob();
    job.setConf(conf);
    int totalMapTasks = 0;
    int totalReduceTasks = 0;
    conf = job.setupJobConf(totalMapTasks, totalReduceTasks,
        100, 100, 100, 100);
    JobConf jconf = new JobConf(conf);

    count = 0;
    //The last hour and last day are given 60 seconds and 120 seconds
View Full Code Here

    LOG.info("jobTrackerUserName is :" + jobTrackerUserName);

    int count = 0;

    SleepJob job = new SleepJob();
    job.setConf(conf);
    int totalMapTasks = 5;
    int totalReduceTasks = 1;
    conf = job.setupJobConf(totalMapTasks, totalReduceTasks,
        100, 100, 100, 100);
    JobConf jconf = new JobConf(conf);

    //The last hour and last day are given 60 seconds and 120 seconds
    //recreate values rate, replacing one hour and 1 day. Waiting for
View Full Code Here

   * @throws Exception
   */
  @Test
  public void testCacheFilesLocalization() throws Exception {
    conf = wovenClient.getDaemonConf();
    SleepJob job = new SleepJob();
    job.setConf(conf);
    JobConf jobConf = job.setupJobConf(4, 1, 4000, 4000, 1000, 1000);
    DistributedCache.createSymlink(jobConf);
    DistributedCache.addCacheFile(cacheFileURI1, jobConf);
    DistributedCache.addCacheFile(cacheFileURI2, jobConf);
    RunningJob runJob = jobClient.submitJob(jobConf);
    JobID jobId = runJob.getID();
View Full Code Here

   * @throws Exception
   */
  @Test
  public void testDeleteCacheFileInDFSAfterLocalized() throws Exception {
    conf = wovenClient.getDaemonConf();
    SleepJob job = new SleepJob();
    job.setConf(conf);
    JobConf jobConf = job.setupJobConf(4, 1, 4000, 4000, 1000, 1000);
    cacheFileURI3 = createCacheFile(tmpFolderPath, cacheFile3);
    DistributedCache.createSymlink(jobConf);
    DistributedCache.addCacheFile(cacheFileURI3, jobConf);
    RunningJob runJob = jobClient.submitJob(jobConf);
    JobID jobId = runJob.getID();
View Full Code Here

   * @throws Exception
   */
  @Test
  public void testCacheSizeExceeds() throws Exception {
    conf = wovenClient.getDaemonConf();
    SleepJob job = new SleepJob();
    String jobArgs []= {"-D","local.cache.size=1024",
                        "-m", "4",
                        "-r", "2",
                        "-mt", "2000",
                        "-rt", "2000",
View Full Code Here

  }
 
  private void runSleepJob(JobConf conf) throws Exception {
    String[] args = { "-m", "1", "-r", "1",
                      "-mt", "10", "-rt", "10" };
    ToolRunner.run(conf, new SleepJob(), args);
  }
View Full Code Here

    startCluster(1, clusterProps, schedulerProps);
    JobConf conf = getJobConf();
    conf.setSpeculativeExecution(false);
    conf.set("mapred.committer.job.setup.cleanup.needed", "false");
    conf.setNumTasksToExecutePerJvm(-1);
    SleepJob sleepJob = new SleepJob();
    sleepJob.setConf(conf);
    JobConf job = sleepJob.setupJobConf(3, 3, 1, 1, 1, 1);
    RunningJob rjob;
    try {
      rjob = runJob(job, false);
      fail("The job should have thrown Exception");
    } catch (Exception e) {
View Full Code Here

    JobConf conf = getJobConf();
    conf.setSpeculativeExecution(false);
    conf.set("mapred.committer.job.setup.cleanup.needed", "false");
    conf.setNumTasksToExecutePerJvm(-1);
    conf.setQueueName(queues[0]);
    SleepJob sleepJob1 = new SleepJob();
    sleepJob1.setConf(conf);
    JobConf sleepJobConf = sleepJob1.setupJobConf(1, 1, 1, 1, 1, 1);
    jobs[0] = runJob(sleepJobConf, true);

    JobConf conf2 = getJobConf();
    conf2.setSpeculativeExecution(false);
    conf2.set("mapred.committer.job.setup.cleanup.needed", "false");
    conf2.setNumTasksToExecutePerJvm(-1);
    conf2.setQueueName(queues[1]);
    SleepJob sleepJob2 = new SleepJob();
    sleepJob2.setConf(conf2);
    JobConf sleep2 = sleepJob2.setupJobConf(3, 3, 5, 3, 5, 3);
    jobs[1] = runJob(sleep2, false);
    assertTrue("Sleep job submitted to queue 1 is not successful", jobs[0]
        .isSuccessful());
    assertTrue("Sleep job submitted to queue 2 is not successful", jobs[1]
        .isSuccessful());
View Full Code Here

    private String launchSleepJob() throws Exception {
        JobConf jobConf = Services.get().get(HadoopAccessorService.class)
                .createJobConf(new URI(getNameNodeUri()).getAuthority());
        JobClient jobClient = createJobClient();

        SleepJob sleepjob = new SleepJob();
        sleepjob.setConf(jobConf);
        jobConf = sleepjob.setupJobConf(1, 1, 1000, 1, 1000, 1);

        final RunningJob runningJob = jobClient.submitJob(jobConf);
        return runningJob.getID().toString();
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.examples.SleepJob$SleepInputFormat

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.