Examples of createJobConf()


Examples of org.apache.hadoop.mapred.MiniMRCluster.createJobConf()

      MyFile[] files = createFiles(fs.getUri(), "/srcdat");
      long totsize = 0;
      for (MyFile f : files) {
        totsize += f.getSize();
      }
      Configuration job = mr.createJobConf();
      job.setLong("distcp.bytes.per.map", totsize / 3);
      ToolRunner.run(new DistCp(job),
          new String[] {"-m", "100",
                        "-log",
                        namenode+"/logs",
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster.createJobConf()

    try {
      // Retrieve HBASE_TEST_UTILITY.mrCluster via reflection, as it is private.
      Field mrClusterField = HBaseTestingUtility.class.getDeclaredField("mrCluster");
      mrClusterField.setAccessible(true);
      MiniMRCluster mrCluster = (MiniMRCluster) mrClusterField.get(HBASE_TEST_UTILITY);
      JobConf jobConf = mrCluster.createJobConf();
      Configuration conf = HBASE_TEST_UTILITY.getConfiguration();
      String proprety = "mapreduce.jobhistory.address";
      String value = jobConf.get(proprety);
      if (value != null) { // maybe null if we're running MRv1
        conf.set(proprety, value);
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster.createJobConf()

      MyFile[] files = createFiles(fs.getUri(), "/srcdat");
      long totsize = 0;
      for (MyFile f : files) {
        totsize += f.getSize();
      }
      JobConf job = mr.createJobConf();
      job.setLong("distcp.bytes.per.map", totsize / 3);
      ToolRunner.run(new DistCp(job),
          new String[] {"-m", "100",
                        "-log",
                        namenode+"/logs",
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster.createJobConf()

    RewindableInputStream ris = null;
    ArrayList<String> seenEvents = new ArrayList<String>(10);
    RunningJob rJob = null;
   
    try {
      JobConf jobConf = mrCluster.createJobConf();
      jobConf.setQueueName(queueName);
      // construct a job with 1 map and 1 reduce task.
      rJob = UtilsForTests.runJob(jobConf, inDir, outDir, 1,
                                  1);
      rJob.waitForCompletion();
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster.createJobConf()

  public void testWithLocal() throws Exception {
    MiniMRCluster mr = null;
    try {
      mr = new MiniMRCluster(2, "file:///", 3);
      Configuration conf = mr.createJobConf();
      runWordCount(conf);
      runMultiFileWordCount(conf);
    } finally {
      if (mr != null) { mr.shutdown(); }
    }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster.createJobConf()

    conf.setBoolean(JTConfig.JT_PERSIST_JOBSTATUS, false);
    try {
      mr = new MiniMRCluster(1, "file:///", 3, null , null, conf);

      Path inFile = new Path(INPUT_FILE);
      fs = inFile.getFileSystem(mr.createJobConf());
      clean(fs);
      makeInput(fs);
     
      StreamJob job = new StreamJob();
      int failed = job.run(genArgs(mr.getJobTrackerPort()));
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster.createJobConf()

      MyFile[] files = createFiles(fs.getUri(), "/srcdat");
      long totsize = 0;
      for (MyFile f : files) {
        totsize += f.getSize();
      }
      Configuration job = mr.createJobConf();
      job.setLong("distcp.bytes.per.map", totsize / 3);
      ToolRunner.run(new DistCp(job),
          new String[] {"-m", "100",
                        "-log",
                        namenode+"/logs",
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster.createJobConf()

      String namenode = fileSys.getUri().toString();

      mr  = new MiniMRCluster(1, namenode, 3);

      List<String> args = new ArrayList<String>();
      for (Map.Entry<String, String> entry : mr.createJobConf()) {
        args.add("-jobconf");
        args.add(entry.getKey() + "=" + entry.getValue());
      }

      String argv[] = new String[] {
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster.createJobConf()

      FileSystem fileSys = dfs.getFileSystem();
      String namenode = fileSys.getUri().toString();
      mr  = new MiniMRCluster(1, namenode, 3);

      List<String> args = new ArrayList<String>();
      for (Map.Entry<String, String> entry : mr.createJobConf()) {
        args.add("-jobconf");
        args.add(entry.getKey() + "=" + entry.getValue());
      }

      // During tests, the default Configuration will use a local mapred
View Full Code Here

Examples of org.apache.oozie.service.HadoopAccessorService.createJobConf()

            XConfiguration protoActionConf = wps.createProtoActionConf(conf, authToken, true);
            WorkflowLib workflowLib = Services.get().get(WorkflowStoreService.class).getWorkflowLibWithNoDB();

            URI uri = new URI(conf.get(OozieClient.APP_PATH));
            HadoopAccessorService has = Services.get().get(HadoopAccessorService.class);
            Configuration fsConf = has.createJobConf(uri.getAuthority());
            FileSystem fs = has.createFileSystem(wfBean.getUser(), uri, fsConf);

            Path configDefault = null;
            // app path could be a directory
            Path path = new Path(uri.getPath());
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.