Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.SleepJob$EmptySplit


    JobConf conf = new JobConf(miniMRCluster.createJobConf());
    conf.setMemoryForMapTask(PER_TASK_LIMIT);
    conf.setMemoryForReduceTask(PER_TASK_LIMIT);

    JobClient jClient = new JobClient(conf);
    SleepJob sleepJob = new SleepJob();
    sleepJob.setConf(conf);
    // Start the job
    Job job = sleepJob.createJob(1, 1, 5000, 1, 1000, 1);
    job.submit();
    boolean TTOverFlowMsgPresent = false;
    while (true) {
      List<TaskReport> allTaskReports = new ArrayList<TaskReport>();
      allTaskReports.addAll(Arrays.asList(jClient
View Full Code Here


    JobConf conf = new JobConf(miniMRCluster.createJobConf());
    // Set per task physical memory limits to be a higher value
    conf.setLong(MRJobConfig.MAP_MEMORY_PHYSICAL_MB, 2 * 1024L);
    conf.setLong(MRJobConfig.REDUCE_MEMORY_PHYSICAL_MB, 2 * 1024L);
    JobClient jClient = new JobClient(conf);
    SleepJob sleepJob = new SleepJob();
    sleepJob.setConf(conf);
    // Start the job
    Job job = sleepJob.createJob(1, 1, 100000, 1, 100000, 1);
    job.submit();
    boolean TTOverFlowMsgPresent = false;
    while (true) {
      List<TaskReport> allTaskReports = new ArrayList<TaskReport>();
      allTaskReports.addAll(Arrays.asList(jClient
View Full Code Here

      IOException {
    String[] args = { "-m", "0", "-r", "0", "-mt", "0", "-rt", "0" };
    boolean throwsException = false;
    String msg = null;
    try {
      ToolRunner.run(jobConf, new SleepJob(), args);
    } catch (RemoteException re) {
      throwsException = true;
      msg = re.unwrapRemoteException().getMessage();
    }
    assertTrue(throwsException);
View Full Code Here

    //This counter will check for count of a loop,
    //which might become infinite.
    int count = 0;

    SleepJob job = new SleepJob();
    job.setConf(conf);
    Job slpJob = job.createJob(5, 1, 1000, 1000, 100, 100);

    DistributedCache.createSymlink(conf);
    URI uri = URI.create(uriPath);
    DistributedCache.addCacheFile(uri, conf);
    JobConf jconf = new JobConf(conf);
View Full Code Here

  @Test
  public void testControlledJob() throws Exception {
    Configuration conf = new Configuration(cluster.getConf());
    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    FinishTaskControlAction.configureControlActionForJob(conf);
    SleepJob job = new SleepJob();
    job.setConf(conf);

    Job slpJob = job.createJob(1, 0, 100, 100, 100, 100);
    slpJob.submit();
    JobClient client = cluster.getJTClient().getClient();

    RunningJob rJob =
        client.getJob(org.apache.hadoop.mapred.JobID.downgrade(slpJob
View Full Code Here

    int taskTrackerCounter = 0;
    // This will store all the tasktrackers in which tasks ran
    ArrayList<String> taskTrackerCollection = new ArrayList<String>();

    do {
      SleepJob job = new SleepJob();
      job.setConf(conf);
      Job slpJob = job.createJob(5, 1, 1000, 1000, 100, 100);

      DistributedCache.createSymlink(conf);
      URI uri = URI.create(uriPath);
      DistributedCache.addCacheFile(uri, conf);
      JobConf jconf = new JobConf(conf);
View Full Code Here

    ArrayList<String> taskTrackerCollection = new ArrayList<String>();
    // This boolean tells if two tasks ran onteh same tasktracker or not
    boolean taskTrackerFound = false;

    do {
      SleepJob job = new SleepJob();
      job.setConf(conf);
      Job slpJob = job.createJob(5, 1, 1000, 1000, 100, 100);

      // Before starting, Modify the file
      String input = "This will be the content of\n" + "distributed cache\n";
      // Creating the path with the file
      DataOutputStream file =
View Full Code Here

  }

  @Test
  public void testJobSubmission() throws Exception {
    Configuration conf = new Configuration(cluster.getConf());
    SleepJob job = new SleepJob();
    job.setConf(conf);
    Job rJob = job.createJob(1, 1, 100, 100, 100, 100);
    rJob = cluster.getJTClient().submitAndVerifyJob(rJob);
    cluster.getJTClient().verifyJobHistory(rJob.getJobID());
  }
View Full Code Here

  @Test
  public void testTaskDetails() throws Exception {
    Configuration conf = new Configuration(cluster.getConf());
    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    FinishTaskControlAction.configureControlActionForJob(conf);
    SleepJob job = new SleepJob();
    job.setConf(conf);

    Job rJob = job.createJob(1, 1, 100, 100, 100, 100);
    JobClient client = cluster.getJTClient().getClient();
    rJob.submit();
    RunningJob rJob1 =
        client.getJob(org.apache.hadoop.mapred.JobID.downgrade(rJob.getJobID()));
    JobID id = rJob.getJobID();
View Full Code Here

  @Test
  public void testFailedTaskJobStatus()
      throws IOException, InterruptedException, ClassNotFoundException {
    Configuration conf = new Configuration(cluster.getConf());
    TaskInfo taskInfo = null;
    SleepJob job = new SleepJob();
    job.setConf(conf);
    Job slpJob = job.createJob(3, 1, 4000, 4000, 100, 100);
    JobConf jobConf = new JobConf(conf);
    jobConf.setMaxMapAttempts(20);
    jobConf.setMaxReduceAttempts(20);
    slpJob.submit();
    RunningJob runJob =
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.SleepJob$EmptySplit

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.