Package org.apache.hadoop.corona

Examples of org.apache.hadoop.corona.MiniCoronaCluster$Builder


    validateAttempt(tip, attemptId, ts, cleanupNeeded, false);
  }

  public void testWithDFS() throws IOException {
    MiniDFSCluster dfs = null;
    MiniCoronaCluster mr = null;
    CoronaJobTracker jt = null;
    FileSystem fileSys = null;
    try {
      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, 4, true, null);
      fileSys = dfs.getFileSystem();
      String namenode = fileSys.getUri().toString();
      mr = new MiniCoronaCluster.Builder().
          numTaskTrackers(10).namenode(namenode).numDir(1).build();
      final Path inDir = new Path("./input");
      final Path outDir = new Path("./output");
      String input = "The quick brown fox\nhas many silly\nred fox sox\n";
      {
        LOG.info("launch job with fail tasks");
        // launch job with fail tasks
        JobConf jobConf = mr.createJobConf();
        jobConf.setOutputCommitter(CommitterWithLogs.class);
        JobClient jc = prepareJob(jobConf, inDir, outDir, input);
        RunningJob rJob = jc.submitJob(jobConf);
        jc.monitorAndPrintJob(jobConf, rJob);
        jt = (CoronaJobTracker) jc.jobSubmitClient;
        validateJob(rJob, jt, true);
        fileSys.delete(outDir, true);
      }
      {
        LOG.info("launch job with fail tasks and fail-cleanups with exit(-1)");
        // launch job with fail tasks and fail-cleanups with exit(-1)
        JobConf jobConf = mr.createJobConf();
        jobConf.setOutputCommitter(CommitterWithFailTaskCleanup.class);
        JobClient jc = prepareJob(jobConf, inDir, outDir, input);
        RunningJob rJob = jc.submitJob(jobConf);
        jt = (CoronaJobTracker) jc.jobSubmitClient;
        rJob.waitForCompletion();
        validateJob(rJob, jt, true);
        fileSys.delete(outDir, true);
      }
      {
        LOG.info("launch job with fail tasks and fail-cleanups with IOE");
        // launch job with fail tasks and fail-cleanups with IOE
        JobConf jobConf = mr.createJobConf();
        jobConf.setOutputCommitter(CommitterWithFailTaskCleanup2.class);
        JobClient jc = prepareJob(jobConf, inDir, outDir, input);
        RunningJob rJob = jc.submitJob(jobConf);
        jt = (CoronaJobTracker) jc.jobSubmitClient;
        rJob.waitForCompletion();
        validateJob(rJob, jt, true);
        fileSys.delete(outDir, true);
      }

      {
        LOG.info("launch job with fail tasks and turn off task-cleanup task");
        // launch job with fail tasks and turn off task-cleanup task
        JobConf jobConf = mr.createJobConf();
        jobConf.setOutputCommitter(CommitterWithLogs.class);
        jobConf.setTaskCleanupNeeded(false);
        JobClient jc = prepareJob(jobConf, inDir, outDir, input);
        RunningJob rJob = jc.submitJob(jobConf);
        jt = (CoronaJobTracker) jc.jobSubmitClient;
        rJob.waitForCompletion();
        validateJob(rJob, jt, false);
        fileSys.delete(outDir, true);
      }
      {
        LOG.info("launch job with all attempts failing");
        JobConf jobConf = mr.createJobConf();
        jobConf.setMaxMapAttempts(3);
        JobClient jc = prepareJob(jobConf, inDir, outDir, input);
        RunningJob rJob = jc.submitJob(jobConf);
        jt = (CoronaJobTracker) jc.jobSubmitClient;
        rJob.waitForCompletion();
        assertTrue(rJob.isComplete() && !rJob.isSuccessful());
        fileSys.delete(outDir, true);
      }
    } catch (InterruptedException e) {
      e.printStackTrace();
    } finally {
      if (dfs != null) { dfs.shutdown(); }
      if (mr != null) { mr.shutdown(); }
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.corona.MiniCoronaCluster$Builder

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.