Package org.apache.hadoop.mapred

Examples of org.apache.hadoop.mapred.RunningJob


    protected void processKill(String jobid) throws IOException
    {
        if (mJobConf != null) {
            JobClient jc = new JobClient(mJobConf);
            JobID id = JobID.forName(jobid);
            RunningJob job = jc.getJob(id);
            if (job == null)
                System.out.println("Job with id " + jobid + " is not active");
            else
            {   
                job.killJob();
                log.info("Kill " + id + " submitted.");
            }
        }
    }
View Full Code Here


            }

            logger.info("Number of chunks: " + numChunks + ", number of reducers: " + numReducers
                        + ", save keys: " + saveKeys + ", reducerPerBucket: " + reducerPerBucket);
            logger.info("Building store...");
            RunningJob job = JobClient.runJob(conf);

            // Once the job has completed log the counter
            Counters counters = job.getCounters();

            if(saveKeys) {
                if(reducerPerBucket) {
                    logger.info("Number of collisions in the job - "
                                + counters.getCounter(KeyValueWriter.CollisionCounter.NUM_COLLISIONS));
View Full Code Here

    try {
      jobClient = new JobClient(new InetSocketAddress("localhost", 8021), conf);
      jobClient.setConf(conf); // Bug in constructor, doesn't set conf.
 
      for (JobStatus js : jobClient.getAllJobs()) {
        RunningJob rj = jobClient.getJob(js.getJobID());
        if (rj == null)
          continue;
        String jobName = rj.getJobName();
        if (jobName == null)
          continue;
        //extract TP$imageHash$imageId$step from jobName - we filter on image hash
        if (jobName.startsWith("TP") && !jobName.contains("_TEST")) {
          String[]names = jobName.split("\\$");
View Full Code Here

    }

    protected void processKill(String jobid) throws IOException
    {
        if (mJobClient != null) {
            RunningJob job = mJobClient.getJob(jobid);
            if (job == null) {
                StringBuilder sb = new StringBuilder();
                sb.append("Job with id ");
                sb.append(jobid);
                sb.append(" is not active");
                System.out.println(sb.toString());
            } else {
                job.killJob();
                log.error("kill submited.");
            }
        }
    }
View Full Code Here

            //
            // Now, actually submit the job (using the submit name)
            //
            JobClient jobClient = execEngine.getJobClient();
            RunningJob status = jobClient.submitJob(conf);
            log.debug("submitted job: " + status.getJobID());
           
            long sleepTime = 1000;
            double lastQueryProgress = -1.0;
            int lastJobsQueued = -1;
            double lastMapProgress = -1.0;
            double lastReduceProgress = -1.0;
            while (true) {
                try {
                    Thread.sleep(sleepTime); } catch (Exception e) {}
                   
                    if (status.isComplete()) {
                        success = status.isSuccessful();
                        if (log.isDebugEnabled()) {
                            StringBuilder sb = new StringBuilder();
                            sb.append("Job finished ");
                            sb.append((success ? "" : "un"));
                            sb.append("successfully");
                            log.debug(sb.toString());
                        }
                        if (success) {
                            mrJobNumber++;
                        }
                        double queryProgress = ((double) mrJobNumber) / ((double) numMRJobs);
                        if (queryProgress > lastQueryProgress) {
                            if (log.isInfoEnabled()) {
                                StringBuilder sbProgress = new StringBuilder();
                                sbProgress.append("Pig progress = ");
                                sbProgress.append(((int) (queryProgress * 100)));
                                sbProgress.append("%");
                                log.info(sbProgress.toString());
                            }
                            lastQueryProgress = queryProgress;
                        }
                        break;
                    }
                    else // still running
                    {
                        double mapProgress = status.mapProgress();
                        double reduceProgress = status.reduceProgress();
                        if (lastMapProgress != mapProgress || lastReduceProgress != reduceProgress) {
                            if (log.isDebugEnabled()) {
                                StringBuilder sbProgress = new StringBuilder();
                                sbProgress.append("Hadoop job progress: Map=");
                                sbProgress.append((int) (mapProgress * 100));
                                sbProgress.append("% Reduce=");
                                sbProgress.append((int) (reduceProgress * 100));
                                sbProgress.append("%");
                                log.debug(sbProgress.toString());
                            }
                            lastMapProgress = mapProgress;
                            lastReduceProgress = reduceProgress;
                        }
                        double numJobsCompleted = mrJobNumber;
                        double thisJobProgress = (mapProgress + reduceProgress) / 2.0;
                        double queryProgress = (numJobsCompleted + thisJobProgress) / ((double) numMRJobs);
                        if (queryProgress > lastQueryProgress) {
                            if (log.isInfoEnabled()) {
                                StringBuilder sbProgress = new StringBuilder();
                                sbProgress.append("Pig progress = ");
                                sbProgress.append(((int) (queryProgress * 100)));
                                sbProgress.append("%");
                                log.info(sbProgress.toString());
                            }
                            lastQueryProgress = queryProgress;
                        }
                    }
            }

            // bug 1030028: if the input file is empty; hadoop doesn't create the output file!
            Path outputFile = conf.getOutputPath();
            String outputName = outputFile.getName();
            int colon = outputName.indexOf(':');
            if (colon != -1) {
                outputFile = new Path(outputFile.getParent(), outputName.substring(0, colon));
            }
               
            try {
                ElementDescriptor descriptor =
                    ((HDataStorage)(pom.pigContext.getDfs())).asElement(outputFile.toString());

                if (success && !descriptor.exists()) {
                       
                    // create an empty output file
                    PigFile f = new PigFile(outputFile.toString(), false);
                    f.store(BagFactory.getInstance().newDefaultBag(),
                            new PigStorage(),
                            pom.pigContext);
                }
            }
            catch (DataStorageException e) {
                throw WrappedIOException.wrap("Failed to obtain descriptor for " + outputFile.toString(), e);
            }

            if (!success) {
                // go find the error messages
                getErrorMessages(jobClient.getMapTaskReports(status.getJobID()),
                        "map");
                getErrorMessages(jobClient.getReduceTaskReports(status.getJobID()),
                        "reduce");
            }
            else {
                long timeSpent = 0;
             
                // NOTE: this call is crashing due to a bug in Hadoop; the bug is known and the patch has not been applied yet.
                TaskReport[] mapReports = jobClient.getMapTaskReports(status.getJobID());
                TaskReport[] reduceReports = jobClient.getReduceTaskReports(status.getJobID());
                for (TaskReport r : mapReports) {
                    timeSpent += (r.getFinishTime() - r.getStartTime());
                }
                for (TaskReport r : reduceReports) {
                    timeSpent += (r.getFinishTime() - r.getStartTime());
View Full Code Here

      jc.setInputFormat(BitPostingIndexInputFormat.class);
      jc.setOutputFormat(NullOutputFormat.class);
      BitPostingIndexInputFormat.setStructures(jc, sourceStructureName, sourceLookupStructureName);
      HadoopUtility.toHConfiguration(index, jc);
     
      RunningJob rj = JobClient.runJob(jc);
      JobID jobId = rj.getID();
      HadoopUtility.finishTerrierJob(jc);
      if (! rj.isSuccessful())
      {
        throw new Exception("Could not complete job");
      }
      //logger.info("Inv2DirectMultiReduce MR job "+ jobId.toString() + " is completed, now finishing");
    }
View Full Code Here

    FileOutputFormat.setOutputPath(conf, new Path(index.getPath()));
    HadoopUtility.toHConfiguration(index, conf);
   
    conf.setOutputFormat(NullOutputFormat.class);
    try{
      RunningJob rj = JobClient.runJob(conf);
      rj.getID();
      HadoopUtility.finishTerrierJob(conf);
    } catch (Exception e) {
      throw new Exception("Problem running job to reverse metadata", e);
    }
    //only update the index from the controlling process, so that we dont have locking/concurrency issues
View Full Code Here

    }
   
    JobID jobId = null;
    boolean ranOK = true;
    try{
      RunningJob rj = JobClient.runJob(conf);
      jobId = rj.getID();
      HadoopUtility.finishTerrierJob(conf);
    } catch (Exception e) {
      logger.error("Problem running job", e);
      ranOK = false;
    }
View Full Code Here

    DistCopier copier = getCopier(conf, args);
   
    if (copier != null) {
      try {
        JobClient client = copier.getJobClient();
        RunningJob job = client.submitJob(copier.getJobConf());
        try {
          if (!client.monitorAndPrintJob(copier.getJobConf(), job)) {
            throw new IOException("Job failed!");
          }
        } catch (InterruptedException ie) {
View Full Code Here

        }
        fs.setReplication(opList, OP_LIST_REPLICATION); // increase replication for control file
      }

      jobConf.setInt(OP_COUNT_LABEL, opCount);
      RunningJob rJob = jClient.submitJob(jobConf);
      JobContext ctx = new JobContext(rJob, jobConf);
      submitted.add(ctx);
    } while (!done);

    return submitted;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapred.RunningJob

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.