Package org.apache.hadoop.mapred

Examples of org.apache.hadoop.mapred.Counters$Group


        JobID mapRedJobID = job.getAssignedJobID();
        RunningJob runningJob = null;
        try {
            runningJob = jobClient.getJob(mapRedJobID);
            if(runningJob != null) {
                Counters counters = runningJob.getCounters();
                if (counters==null)
                {
                    long nullCounterCount = aggMap.get(PigWarning.NULL_COUNTER_COUNT)==null?0 : aggMap.get(PigWarning.NULL_COUNTER_COUNT);
                    nullCounterCount++;
                    aggMap.put(PigWarning.NULL_COUNTER_COUNT, nullCounterCount);
                }
                try {
                    for (Enum e : PigWarning.values()) {
                        if (e != PigWarning.NULL_COUNTER_COUNT) {
                            Long currentCount = aggMap.get(e);
                            currentCount = (currentCount == null ? 0 : currentCount);
                            // This code checks if the counters is null, if it is,
                            // we need to report to the user that the number
                            // of warning aggregations may not be correct. In fact,
                            // Counters should not be null, it is
                            // a hadoop bug, once this bug is fixed in hadoop, the
                            // null handling code should never be hit.
                            // See Pig-943
                            if (counters != null)
                                currentCount += counters.getCounter(e);
                            aggMap.put(e, currentCount);
                        }
                    }
                } catch (Exception e) {
                    log.warn("Exception getting counters.", e);
View Full Code Here


     * @param operationID After being collected on global counters (POCounter),
     * these values are passed via configuration file to PORank, by using the unique
     * operation identifier
     */
    private void saveCounters(Job job, String operationID) {
        Counters counters;
        Group groupCounters;

        Long previousValue = 0L;
        Long previousSum = 0L;
        ArrayList<Pair<String,Long>> counterPairs;

        try {
            counters = HadoopShims.getCounters(job);
            groupCounters = counters.getGroup(getGroupName(counters.getGroupNames()));

            Iterator<Counter> it = groupCounters.iterator();
            HashMap<Integer,Long> counterList = new HashMap<Integer, Long>();

            while(it.hasNext()) {
View Full Code Here

    public static long getDefaultBlockSize(FileSystem fs, Path path) {
        return fs.getDefaultBlockSize(path);
    }

    public static Counters getCounters(Job job) throws IOException, InterruptedException {
        return new Counters(job.getJob().getCounters());
    }
View Full Code Here

   */
  protected boolean checkFatalErrors(TaskHandle t, StringBuilder errMsg) {
    ExecDriverTaskHandle th = (ExecDriverTaskHandle) t;
    RunningJob rj = th.getRunningJob();
    try {
      Counters ctrs = th.getCounters();
      for (Operator<? extends Serializable> op : work.getAliasToWork().values()) {
        if (op.checkFatalErrors(ctrs, errMsg)) {
          return true;
        }
      }
View Full Code Here

     * Parse and add the job counters
     */
    @SuppressWarnings("deprecation")
    private static void parseAndAddJobCounters(Map<String, String> job, String counters) {
        try {
            Counters counterGroups = Counters.fromEscapedCompactString(counters);
            for (Group otherGroup : counterGroups) {
                Group group = counterGroups.getGroup(otherGroup.getName());
                for (Counter otherCounter : otherGroup) {
                    Counter counter = group.getCounterForName(otherCounter.getName());
                    job.put(otherCounter.getName(), String.valueOf(counter.getValue()));
                }
            }
View Full Code Here

                    case FINISH_TIME:
                        endTime = Long.valueOf(val);
                        break;                   
                    case COUNTERS: {
                        try {
                            Counters counters = Counters.fromEscapedCompactString(val);
                            long rows = counters.getGroup(TASK_COUNTER_GROUP)
                                    .getCounterForName(MAP_INPUT_RECORDS).getCounter();
                            if (rows < minMapRows) minMapRows = rows;
                            if (rows > maxMapRows) maxMapRows = rows;
                        } catch (ParseException e) {
                            LOG.warn("Failed to parse job counters", e);
                        }
                    }
                    break;
                    default:
                        LOG.warn("JobHistory.Keys." + key
                                + " : NOT INCLUDED IN PERFORMANCE ADVISOR MAP COUNTERS");
                        break;
                    }
                }
                duration = endTime - startTime;
                if (minMapTime > duration) minMapTime = duration;
                if (maxMapTime < duration) maxMapTime = duration;
                totalMapTime += duration;       
            } else if (task.get(Keys.TASK_TYPE).equals("REDUCE")) {
                Map<JobHistory.Keys, String> reduceTask = task.getValues();
                Map<JobHistory.Keys, String> successTaskAttemptMap  =  getLastSuccessfulTaskAttempt(task);
                // NOTE: Following would lead to less number of actual tasks collected in the tasklist array
                if (successTaskAttemptMap != null) {
                    reduceTask.putAll(successTaskAttemptMap);
                } else {
                    LOG.warn("Task:<" + task.get(Keys.TASKID) + "> is not successful - SKIPPING");
                }
                long duration = 0;
                long startTime = 0;
                long endTime = 0;
                int size = reduceTask.size();
                numberReduces++;

                Iterator<Map.Entry<JobHistory.Keys, String>> kv = reduceTask.entrySet().iterator();
                for (int j = 0; j < size; j++) {
                    Map.Entry<JobHistory.Keys, String> rtc = kv.next();
                    JobHistory.Keys key = rtc.getKey();
                    String val = rtc.getValue();
                    switch (key) {
                    case START_TIME:
                        startTime = Long.valueOf(val);
                        break;
                    case FINISH_TIME:
                        endTime = Long.valueOf(val);
                        break;
                    case COUNTERS: {
                        try {
                            Counters counters = Counters.fromEscapedCompactString(val);
                            long rows = counters.getGroup(TASK_COUNTER_GROUP)
                                    .getCounterForName(REDUCE_INPUT_RECORDS).getCounter();
                            if (rows < minReduceRows) minReduceRows = rows;
                            if (rows > maxReduceRows) maxReduceRows = rows;
                        } catch (ParseException e) {
                            LOG.warn("Failed to parse job counters", e);
View Full Code Here

            }
        }
    }

    private Counters covertToHadoopCounters(TezCounters tezCounters) {
        Counters counters = new Counters();
        for (CounterGroup tezGrp : tezCounters) {
            Group grp = counters.addGroup(tezGrp.getName(), tezGrp.getDisplayName());
            for (TezCounter counter : tezGrp) {
                grp.addCounter(counter.getName(), counter.getDisplayName(), counter.getValue());
            }
        }
        return counters;
View Full Code Here

    }

    @SuppressWarnings("deprecation")
    void computeWarningAggregate(Job job, Map<Enum, Long> aggMap) {
        try {
            Counters counters = HadoopShims.getCounters(job);
            if (counters==null)
            {
                long nullCounterCount =
                        (aggMap.get(PigWarning.NULL_COUNTER_COUNT) == null)
                          ? 0
                          : aggMap.get(PigWarning.NULL_COUNTER_COUNT);
                nullCounterCount++;
                aggMap.put(PigWarning.NULL_COUNTER_COUNT, nullCounterCount);
            }
            for (Enum e : PigWarning.values()) {
                if (e != PigWarning.NULL_COUNTER_COUNT) {
                    Long currentCount = aggMap.get(e);
                    currentCount = (currentCount == null ? 0 : currentCount);
                    // This code checks if the counters is null, if it is,
                    // we need to report to the user that the number
                    // of warning aggregations may not be correct. In fact,
                    // Counters should not be null, it is
                    // a hadoop bug, once this bug is fixed in hadoop, the
                    // null handling code should never be hit.
                    // See Pig-943
                    if (counters != null)
                        currentCount += counters.getCounter(e);
                    aggMap.put(e, currentCount);
                }
            }
        } catch (Exception e) {
            String msg = "Unable to retrieve job to compute warning aggregation.";
View Full Code Here

            Cluster cluster = new Cluster(job.getJobConf());
            org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID());
            if (mrJob == null) { // In local mode, mrJob will be null
                mrJob = job.getJob();
            }
            return new Counters(mrJob.getCounters());
        } catch (Exception ir) {
            throw new IOException(ir);
        }
    }
View Full Code Here

      // let the job retry several times, which eventually lead to failure.
      if (fatal) {
        continue; // wait until rj.isComplete
      }

      Counters ctrs = th.getCounters();

      if (fatal = checkFatalErrors(ctrs, errMsg)) {
        console.printError("[Fatal Error] " + errMsg.toString() + ". Killing the job.");
        rj.killJob();
        continue;
      }
      errMsg.setLength(0);

      updateCounters(ctrs, rj);

      // Prepare data for Client Stat Publishers (if any present) and execute them
      if (clientStatPublishers.size() > 0 && ctrs != null) {
        Map<String, Double> exctractedCounters = extractAllCounterValues(ctrs);
        for (ClientStatsPublisher clientStatPublisher : clientStatPublishers) {
          try {
            clientStatPublisher.run(exctractedCounters, rj.getID().toString());
          } catch (RuntimeException runtimeException) {
            LOG.error("Exception " + runtimeException.getClass().getCanonicalName()
                + " thrown when running clientStatsPublishers. The stack trace is: ",
                runtimeException);
          }
        }
      }

      String report = " " + getId() + " map = " + mapProgress + "%,  reduce = " + reduceProgress
          + "%";


      if (!report.equals(lastReport)
          || System.currentTimeMillis() >= reportTime + maxReportInterval) {
        // find out CPU msecs
        // In the case that we can't find out this number, we just skip the step to print
        // it out.
        if (ctrs != null) {
          Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
              "CPU_MILLISECONDS");
          if (counterCpuMsec != null) {
            long newCpuMSec = counterCpuMsec.getValue();
            if (newCpuMSec > 0) {
              cpuMsec = newCpuMSec;
              report += ", Cumulative CPU "
                + (cpuMsec / 1000D) + " sec";
            }
          }
        }

        // write out serialized plan with counters to log file
        // LOG.info(queryPlan);
        String output = dateFormat.format(Calendar.getInstance().getTime()) + report;
        SessionState ss = SessionState.get();
        if (ss != null) {
          ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
          ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
              Keys.TASK_HADOOP_PROGRESS, output);
          ss.getHiveHistory().progressTask(SessionState.get().getQueryId(), this.task);
          this.callBackObj.logPlanProgress(ss);
        }
        console.printInfo(output);
        lastReport = report;
        reportTime = System.currentTimeMillis();
      }
    }

    if (cpuMsec > 0) {
      console.printInfo("MapReduce Total cumulative CPU time: "
          + Utilities.formatMsecToStr(cpuMsec));
    }

    boolean success;

    Counters ctrs = th.getCounters();
    if (fatal) {
      success = false;
    } else {
      // check for fatal error again in case it occurred after
      // the last check before the job is completed
      if (checkFatalErrors(ctrs, errMsg)) {
        console.printError("[Fatal Error] " + errMsg.toString());
        success = false;
      } else {
        success = rj.isSuccessful();
      }
    }

    if (ctrs != null) {
      Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
          "CPU_MILLISECONDS");
      if (counterCpuMsec != null) {
        long newCpuMSec = counterCpuMsec.getValue();
        if (newCpuMSec > cpuMsec) {
          cpuMsec = newCpuMSec;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapred.Counters$Group

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.