Package org.apache.hadoop.mapred

Examples of org.apache.hadoop.mapred.Counters


    RunningJob rj = th.getRunningJob();
    this.mapProgress = Math.round(rj.mapProgress() * 100);
    this.reduceProgress = Math.round(rj.reduceProgress() * 100);
    taskCounters.put("CNTR_NAME_" + getId() + "_MAP_PROGRESS", Long.valueOf(this.mapProgress));
    taskCounters.put("CNTR_NAME_" + getId() + "_REDUCE_PROGRESS", Long.valueOf(this.reduceProgress));
    Counters ctrs = th.getCounters();
    for (Operator<? extends Serializable> op: work.getAliasToWork().values()) {
      op.updateCounters(ctrs);
    }
    if (work.getReducer() != null) {
      work.getReducer().updateCounters(ctrs);
View Full Code Here


        JobID mapRedJobID = job.getAssignedJobID();
        RunningJob runningJob = null;
        try {
            runningJob = jobClient.getJob(mapRedJobID);
            if(runningJob != null) {
                Counters counters = runningJob.getCounters();
                if (counters==null)
                {
                    long nullCounterCount = aggMap.get(PigWarning.NULL_COUNTER_COUNT)==null?0 : aggMap.get(PigWarning.NULL_COUNTER_COUNT);
                    nullCounterCount++;
                    aggMap.put(PigWarning.NULL_COUNTER_COUNT, nullCounterCount);
                }
                for (Enum e : PigWarning.values()) {
                    if (e != PigWarning.NULL_COUNTER_COUNT) {
                        Long currentCount = aggMap.get(e);
                        currentCount = (currentCount == null ? 0 : currentCount);
                        // This code checks if the counters is null, if it is,
                        // we need to report to the user that the number
                        // of warning aggregations may not be correct. In fact,
                        // Counters should not be null, it is
                        // a hadoop bug, once this bug is fixed in hadoop, the
                        // null handling code should never be hit.
                        // See Pig-943
                        if (counters != null)
                            currentCount += counters.getCounter(e);
                        aggMap.put(e, currentCount);
                    }
                }
            }
        } catch (IOException ioe) {
View Full Code Here

                } catch (IOException e2) {
                    String error = "Error deserializing plans from the JobConf.";
                    throw new RuntimeException(error, e2);
                }
               
                Counters counters = null;
                try {
                    counters = rj.getCounters();
                    // This code checks if the counters is null, if it is, then all the stats are unknown.
                    // We use -1 to indicate unknown counter. In fact, Counters should not be null, it is
                    // a hadoop bug, once this bug is fixed in hadoop, the null handling code should never be hit.
                    // See Pig-943
                    if (counters!=null)
                    {
                        Counters.Group taskgroup = counters.getGroup("org.apache.hadoop.mapred.Task$Counter");
                        Counters.Group hdfsgroup = counters.getGroup("org.apache.hadoop.mapred.Task$FileSystemCounter");
                        jobStats.put("PIG_STATS_MAP_INPUT_RECORDS", (Long.valueOf(taskgroup.getCounterForName("MAP_INPUT_RECORDS").getCounter())).toString());
                        jobStats.put("PIG_STATS_MAP_OUTPUT_RECORDS", (Long.valueOf(taskgroup.getCounterForName("MAP_OUTPUT_RECORDS").getCounter())).toString());
                        jobStats.put("PIG_STATS_REDUCE_INPUT_RECORDS", (Long.valueOf(taskgroup.getCounterForName("REDUCE_INPUT_RECORDS").getCounter())).toString());
                        jobStats.put("PIG_STATS_REDUCE_OUTPUT_RECORDS", (Long.valueOf(taskgroup.getCounterForName("REDUCE_OUTPUT_RECORDS").getCounter())).toString());
                        jobStats.put("PIG_STATS_BYTES_WRITTEN", (Long.valueOf(hdfsgroup.getCounterForName("HDFS_WRITE").getCounter())).toString());
View Full Code Here

      // let the job retry several times, which eventually lead to failure.
      if (fatal) {
        continue; // wait until rj.isComplete
      }

      Counters ctrs = th.getCounters();

      if (fatal = checkFatalErrors(ctrs, errMsg)) {
        console.printError("[Fatal Error] " + errMsg.toString() + ". Killing the job.");
        rj.killJob();
        continue;
      }
      errMsg.setLength(0);

      updateCounters(ctrs, rj);

      // Prepare data for Client Stat Publishers (if any present) and execute them
      if (clientStatPublishers.size() > 0 && ctrs != null) {
        Map<String, Double> exctractedCounters = extractAllCounterValues(ctrs);
        for (ClientStatsPublisher clientStatPublisher : clientStatPublishers) {
          try {
            clientStatPublisher.run(exctractedCounters, rj.getID().toString());
          } catch (RuntimeException runtimeException) {
            LOG.error("Exception " + runtimeException.getClass().getCanonicalName()
                + " thrown when running clientStatsPublishers. The stack trace is: ",
                runtimeException);
          }
        }
      }

      String report = " " + getId() + " map = " + mapProgress + "%,  reduce = " + reduceProgress
          + "%";


      if (!report.equals(lastReport)
          || System.currentTimeMillis() >= reportTime + maxReportInterval) {
        // find out CPU msecs
        // In the case that we can't find out this number, we just skip the step to print
        // it out.
        if (ctrs != null) {
          Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
              "CPU_MILLISECONDS");
          if (counterCpuMsec != null) {
            long newCpuMSec = counterCpuMsec.getValue();
            if (newCpuMSec > 0) {
              cpuMsec = newCpuMSec;
              report += ", Cumulative CPU "
                + (cpuMsec / 1000D) + " sec";
            }
          }
        }

        // write out serialized plan with counters to log file
        // LOG.info(queryPlan);
        String output = dateFormat.format(Calendar.getInstance().getTime()) + report;
        SessionState ss = SessionState.get();
        if (ss != null) {
          ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
          ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
              Keys.TASK_HADOOP_PROGRESS, output);
          if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
            ss.getHiveHistory().progressTask(SessionState.get().getQueryId(), this.task);
            this.callBackObj.logPlanProgress(ss);
          }
        }
        console.printInfo(output);
        lastReport = report;
        reportTime = System.currentTimeMillis();
      }
    }

    if (cpuMsec > 0) {
      console.printInfo("MapReduce Total cumulative CPU time: "
          + Utilities.formatMsecToStr(cpuMsec));
    }

    boolean success;

    Counters ctrs = th.getCounters();
    if (fatal) {
      success = false;
    } else {
      // check for fatal error again in case it occurred after
      // the last check before the job is completed
      if (checkFatalErrors(ctrs, errMsg)) {
        console.printError("[Fatal Error] " + errMsg.toString());
        success = false;
      } else {
        SessionState ss = SessionState.get();
        if (ss != null) {
          ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
        }
        success = rj.isSuccessful();
      }
    }

    if (ctrs != null) {
      Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
          "CPU_MILLISECONDS");
      if (counterCpuMsec != null) {
        long newCpuMSec = counterCpuMsec.getValue();
        if (newCpuMSec > cpuMsec) {
          cpuMsec = newCpuMSec;
View Full Code Here

      JobID mapRedJobID = job.getAssignedJobID();
      RunningJob runningJob = null;
      try {
        runningJob = jobClient.getJob(mapRedJobID);
        if(runningJob != null) {
            Counters counters = runningJob.getCounters();
                if (counters==null)
                {
                    long nullCounterCount = aggMap.get(PigWarning.NULL_COUNTER_COUNT)==null?0 : aggMap.get(PigWarning.NULL_COUNTER_COUNT);
                    nullCounterCount++;
                    aggMap.put(PigWarning.NULL_COUNTER_COUNT, nullCounterCount);
                }
                for (Enum e : PigWarning.values()) {
                    if (e != PigWarning.NULL_COUNTER_COUNT) {
                        Long currentCount = aggMap.get(e);
                        currentCount = (currentCount == null ? 0 : currentCount);
                        // This code checks if the counters is null, if it is,
                        // we need to report to the user that the number
                        // of warning aggregations may not be correct. In fact,
                        // Counters should not be null, it is
                        // a hadoop bug, once this bug is fixed in hadoop, the
                        // null handling code should never be hit.
                        // See Pig-943
                        if (counters != null)
                            currentCount += counters.getCounter(e);
                        aggMap.put(e, currentCount);
                    }
                }
        }
      } catch (IOException ioe) {
View Full Code Here

    this();
    setMapper(m);
  }

  public MapDriver() {
    setCounters(new Counters());
  }
View Full Code Here

  }

  public PipelineMapReduceDriver() {
    mapReducePipeline = new ArrayList<Pair<Mapper, Reducer>>();
    inputList = new ArrayList<Pair<K1, V1>>();
    setCounters(new Counters());
  }
View Full Code Here

    this();
    setReducer(r);
  }

  public ReduceDriver() {
    setCounters(new Counters());
  }
View Full Code Here

        JobID mapRedJobID = job.getAssignedJobID();
        RunningJob runningJob = null;
        try {
            runningJob = jobClient.getJob(mapRedJobID);
            if(runningJob != null) {
                Counters counters = runningJob.getCounters();
                if (counters==null)
                {
                    long nullCounterCount = aggMap.get(PigWarning.NULL_COUNTER_COUNT)==null?0 : aggMap.get(PigWarning.NULL_COUNTER_COUNT);
                    nullCounterCount++;
                    aggMap.put(PigWarning.NULL_COUNTER_COUNT, nullCounterCount);
                }
                for (Enum e : PigWarning.values()) {
                    if (e != PigWarning.NULL_COUNTER_COUNT) {
                        Long currentCount = aggMap.get(e);
                        currentCount = (currentCount == null ? 0 : currentCount);
                        // This code checks if the counters is null, if it is,
                        // we need to report to the user that the number
                        // of warning aggregations may not be correct. In fact,
                        // Counters should not be null, it is
                        // a hadoop bug, once this bug is fixed in hadoop, the
                        // null handling code should never be hit.
                        // See Pig-943
                        if (counters != null)
                            currentCount += counters.getCounter(e);
                        aggMap.put(e, currentCount);
                    }
                }
            }
        } catch (IOException ioe) {
View Full Code Here

                } catch (IOException e2) {
                    String error = "Error deserializing plans from the JobConf.";
                    throw new RuntimeException(error, e2);
                }
               
                Counters counters = null;
                try {
                    counters = rj.getCounters();
                    // This code checks if the counters is null, if it is, then all the stats are unknown.
                    // We use -1 to indicate unknown counter. In fact, Counters should not be null, it is
                    // a hadoop bug, once this bug is fixed in hadoop, the null handling code should never be hit.
                    // See Pig-943
                    if (counters!=null)
                    {
                        Counters.Group taskgroup = counters.getGroup("org.apache.hadoop.mapred.Task$Counter");
                        Counters.Group hdfsgroup = counters.getGroup("FileSystemCounters");
                        jobStats.put("PIG_STATS_MAP_INPUT_RECORDS", (Long.valueOf(taskgroup.getCounterForName("MAP_INPUT_RECORDS").getCounter())).toString());
                        jobStats.put("PIG_STATS_MAP_OUTPUT_RECORDS", (Long.valueOf(taskgroup.getCounterForName("MAP_OUTPUT_RECORDS").getCounter())).toString());
                        jobStats.put("PIG_STATS_REDUCE_INPUT_RECORDS", (Long.valueOf(taskgroup.getCounterForName("REDUCE_INPUT_RECORDS").getCounter())).toString());
                        jobStats.put("PIG_STATS_REDUCE_OUTPUT_RECORDS", (Long.valueOf(taskgroup.getCounterForName("REDUCE_OUTPUT_RECORDS").getCounter())).toString());
                        jobStats.put("PIG_STATS_BYTES_WRITTEN", (Long.valueOf(hdfsgroup.getCounterForName("HDFS_BYTES_WRITTEN").getCounter())).toString());
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapred.Counters

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.