Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.Job.addFileToClassPath()


    try {
      try {
        Location programJarCopy = copyProgramJar();
        try {
          job.setJar(jobJar.toURI().toString());
          job.addFileToClassPath(new Path(programJarCopy.toURI()));

          MapReduceContextConfig contextConfig = new MapReduceContextConfig(job);
          // We start long-running tx to be used by mapreduce job tasks.
          Transaction tx = txClient.startLong();
          try {
View Full Code Here


    job.setOutputFormatClass(NullOutputFormat.class);
    FileInputFormat.setInputPaths(job, first);
    // Creates the Job Configuration
    job.addCacheFile(
      new URI(first.toUri().toString() + "#distributed.first.symlink"));
    job.addFileToClassPath(second);
    job.addArchiveToClassPath(third);
    job.addCacheArchive(fourth.toUri());
    job.setMaxMapAttempts(1); // speed up failures

    job.submit();
View Full Code Here

    job.setNumReduceTasks(0);
   
    FileOutputFormat.setOutputPath(job,
        new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
        "failmapper-output"));
    job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
    job.submit();
    String trackingUrl = job.getTrackingURL();
    String jobId = job.getJobID().toString();
    boolean succeeded = job.waitForCompletion(true);
    Assert.assertFalse(succeeded);
View Full Code Here

        SleepJob sleepJob = new SleepJob();
        sleepJob.setConf(mrCluster.getConfig());
        Job job = sleepJob.createJob(3, 0, 10000, 1, 0, 0);
        // //Job with reduces
        // Job job = sleepJob.createJob(3, 2, 10000, 1, 10000, 1);
        job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
        job.submit();
        String trackingUrl = job.getTrackingURL();
        String jobId = job.getJobID().toString();
        job.waitForCompletion(true);
        Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
View Full Code Here

    // Set the job jar to a new "dummy" jar so we can check that its extracted
    // properly
    job.setJar(makeJobJarWithLib(TEST_ROOT_DIR.toUri().toString()));
    // Because the job jar is a "dummy" jar, we need to include the jar with
    // DistributedCacheChecker or it won't be able to find it
    job.addFileToClassPath(new Path(
            JarFinder.getJar(DistributedCacheChecker.class)));
   
    job.setMapperClass(DistributedCacheChecker.class);
    job.setOutputFormatClass(NullOutputFormat.class);
View Full Code Here

    FileInputFormat.setInputPaths(job, first);
    // Creates the Job Configuration
    job.addCacheFile(
        new URI(first.toUri().toString() + "#distributed.first.symlink"));
    job.addFileToClassPath(second);
    job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
    job.addArchiveToClassPath(third);
    job.addCacheArchive(fourth.toUri());
    job.setMaxMapAttempts(1); // speed up failures
View Full Code Here

    FileInputFormat.setInputPaths(job, first);
    // Creates the Job Configuration
    job.addCacheFile(
        new URI(first.toUri().toString() + "#distributed.first.symlink"));
    job.addFileToClassPath(second);
    job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
    job.addArchiveToClassPath(third);
    job.addCacheArchive(fourth.toUri());
    job.setMaxMapAttempts(1); // speed up failures

    job.submit();
View Full Code Here

    int numReduces = sleepConf.getInt("TestMRJobs.testSleepJob.reduces", 2); // or sleepConf.getConfig().getInt(MRJobConfig.NUM_REDUCES, 2);
  
    // job with 3 maps (10s) and numReduces reduces (5s), 1 "record" each:
    Job job = sleepJob.createJob(3, numReduces, 10000, 1, 5000, 1);

    job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
    job.setJarByClass(SleepJob.class);
    job.setMaxMapAttempts(1); // speed up failures
    job.submit();
    String trackingUrl = job.getTrackingURL();
    String jobId = job.getJobID().toString();
View Full Code Here

    Job job = randomWriterJob.createJob(mrCluster.getConfig());
    Path outputDir =
        new Path(mrCluster.getTestWorkDir().getAbsolutePath(), "random-output");
    FileOutputFormat.setOutputPath(job, outputDir);
    job.setSpeculativeExecution(false);
    job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
    job.setJarByClass(RandomTextWriterJob.class);
    job.setMaxMapAttempts(1); // speed up failures
    job.submit();
    String trackingUrl = job.getTrackingURL();
    String jobId = job.getJobID().toString();
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.