Package org.apache.hadoop.mapred.Counters

Examples of org.apache.hadoop.mapred.Counters.Counter


      // find out CPU msecs
      // In the case that we can't find out this number, we just skip the step to print
      // it out.
      if (ctrs != null) {
        Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
            "CPU_MILLISECONDS");
        if (counterCpuMsec != null) {
          long newCpuMSec = counterCpuMsec.getValue();
          if (newCpuMSec > 0) {
            cpuMsec = newCpuMSec;
            report.append(", Cumulative CPU ").append((cpuMsec / 1000D)).append(" sec");
          }
        }
      }

      // write out serialized plan with counters to log file
      // LOG.info(queryPlan);
      String output = report.toString();
      SessionState ss = SessionState.get();
      if (ss != null) {
        ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
        ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
            Keys.TASK_HADOOP_PROGRESS, output);
        if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
          ss.getHiveHistory().progressTask(SessionState.get().getQueryId(), this.task);
          this.callBackObj.logPlanProgress(ss);
        }
      }
      console.printInfo(output);
      reportTime = System.currentTimeMillis();
    }

    if (cpuMsec > 0) {
      console.printInfo("MapReduce Total cumulative CPU time: "
          + Utilities.formatMsecToStr(cpuMsec));
    }

    boolean success;

    Counters ctrs = th.getCounters();
    if (fatal) {
      success = false;
    } else {
      // check for fatal error again in case it occurred after
      // the last check before the job is completed
      if (checkFatalErrors(ctrs, errMsg)) {
        console.printError("[Fatal Error] " + errMsg.toString());
        success = false;
      } else {
        SessionState ss = SessionState.get();
        if (ss != null) {
          ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
        }
        success = rj.isSuccessful();
      }
    }

    if (ctrs != null) {
      Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
          "CPU_MILLISECONDS");
      if (counterCpuMsec != null) {
        long newCpuMSec = counterCpuMsec.getValue();
        if (newCpuMSec > cpuMsec) {
          cpuMsec = newCpuMSec;
        }
      }
    }
View Full Code Here


        if (job == null) {
          System.out.println("Could not find job " + jobid);
        } else {
          Counters counters = job.getCounters();
          Group group = counters.getGroup(counterGroupName);
          Counter counter = group.getCounterForName(counterName);
          System.out.println(counter.getCounter());
          exitCode = 0;
        }
      } else if (killJob) {
        RunningJob job = getJob(JobID.forName(jobid));
        if (job == null) {
View Full Code Here

                  + jobid);
            }
            exitCode = -1;
          } else {
            Group group = counters.getGroup(counterGroupName);
            Counter counter = group.getCounterForName(counterName);
            System.out.println(counter.getCounter());
            exitCode = 0;
          }
        }
      } else if (killJob) {
        RunningJob job = getJob(JobID.forName(jobid));
View Full Code Here

          || System.currentTimeMillis() >= reportTime + maxReportInterval) {
        // find out CPU msecs
        // In the case that we can't find out this number, we just skip the step to print
        // it out.
        if (ctrs != null) {
          Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
              "CPU_MILLISECONDS");
          if (counterCpuMsec != null) {
            long newCpuMSec = counterCpuMsec.getValue();
            if (newCpuMSec > 0) {
              cpuMsec = newCpuMSec;
              report += ", Cumulative CPU "
                + (cpuMsec / 1000D) + " sec";
            }
          }
        }

        // write out serialized plan with counters to log file
        // LOG.info(queryPlan);
        String output = dateFormat.format(Calendar.getInstance().getTime()) + report;
        SessionState ss = SessionState.get();
        if (ss != null) {
          ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
          ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
              Keys.TASK_HADOOP_PROGRESS, output);
          if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
            ss.getHiveHistory().progressTask(SessionState.get().getQueryId(), this.task);
            this.callBackObj.logPlanProgress(ss);
          }
        }
        console.printInfo(output);
        lastReport = report;
        reportTime = System.currentTimeMillis();
      }
    }

    if (cpuMsec > 0) {
      console.printInfo("MapReduce Total cumulative CPU time: "
          + Utilities.formatMsecToStr(cpuMsec));
    }

    boolean success;

    Counters ctrs = th.getCounters();
    if (fatal) {
      success = false;
    } else {
      // check for fatal error again in case it occurred after
      // the last check before the job is completed
      if (checkFatalErrors(ctrs, errMsg)) {
        console.printError("[Fatal Error] " + errMsg.toString());
        success = false;
      } else {
        SessionState ss = SessionState.get();
        if (ss != null) {
          ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
        }
        success = rj.isSuccessful();
      }
    }

    if (ctrs != null) {
      Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
          "CPU_MILLISECONDS");
      if (counterCpuMsec != null) {
        long newCpuMSec = counterCpuMsec.getValue();
        if (newCpuMSec > cpuMsec) {
          cpuMsec = newCpuMSec;
        }
      }
    }
View Full Code Here

      // find out CPU msecs
      // In the case that we can't find out this number, we just skip the step to print
      // it out.
      if (ctrs != null) {
        Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
            "CPU_MILLISECONDS");
        if (counterCpuMsec != null) {
          long newCpuMSec = counterCpuMsec.getValue();
          if (newCpuMSec > 0) {
            cpuMsec = newCpuMSec;
            report.append(", Cumulative CPU ").append((cpuMsec / 1000D)).append(" sec");
          }
        }
      }

      // write out serialized plan with counters to log file
      // LOG.info(queryPlan);
      String output = report.toString();
      SessionState ss = SessionState.get();
      if (ss != null) {
        ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
        ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
            Keys.TASK_HADOOP_PROGRESS, output);
        if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
          ss.getHiveHistory().progressTask(SessionState.get().getQueryId(), this.task);
          this.callBackObj.logPlanProgress(ss);
        }
      }
      console.printInfo(output);
      reportTime = System.currentTimeMillis();
    }

    if (cpuMsec > 0) {
      console.printInfo("MapReduce Total cumulative CPU time: "
          + Utilities.formatMsecToStr(cpuMsec));
    }

    boolean success;

    Counters ctrs = th.getCounters();
    if (fatal) {
      success = false;
    } else {
      // check for fatal error again in case it occurred after
      // the last check before the job is completed
      if (checkFatalErrors(ctrs, errMsg)) {
        console.printError("[Fatal Error] " + errMsg.toString());
        success = false;
      } else {
        SessionState ss = SessionState.get();
        if (ss != null) {
          ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
        }
        success = rj.isSuccessful();
      }
    }

    if (ctrs != null) {
      Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
          "CPU_MILLISECONDS");
      if (counterCpuMsec != null) {
        long newCpuMSec = counterCpuMsec.getValue();
        if (newCpuMSec > cpuMsec) {
          cpuMsec = newCpuMSec;
        }
      }
    }
View Full Code Here

        
      LOG.info("send partitions to subprocess for map task");
      process.inputClient.partitions(job.getNumReduceTasks());

      // run map
      Counter inputRecordCounter =
        reporter.getCounter("org.apache.hadoop.mapred.Task$Counter",
                            "MAP_INPUT_RECORDS");
      TetherData data = new TetherData();
      while (recordReader.next(data, NullWritable.get())) {
        process.inputClient.input(data.buffer(), data.count());
        inputRecordCounter.increment(data.count()-1);
        if (process.outputService.isFinished())
          break;
      }
      LOG.info("send complete to subprocess for map task");
      process.inputClient.complete();
View Full Code Here

            activeSpillCountRecs = counters.findCounter(
                    PigCounters.PROACTIVE_SPILL_COUNT_RECS).getCounter();

            Iterator<Counter> iter = multistoregroup.iterator();
            while (iter.hasNext()) {
                Counter cter = iter.next();
                multiStoreCounters.put(cter.getName(), cter.getValue());
            }    
           
            Iterator<Counter> iter2 = multiloadgroup.iterator();
            while (iter2.hasNext()) {
                Counter cter = iter2.next();
                multiInputCounters.put(cter.getName(), cter.getValue());
            }
           
        }             
    }
View Full Code Here

            activeSpillCountRecs = counters.findCounter(
                    PigCounters.PROACTIVE_SPILL_COUNT_RECS).getCounter();

            Iterator<Counter> iter = multistoregroup.iterator();
            while (iter.hasNext()) {
                Counter cter = iter.next();
                multiStoreCounters.put(cter.getName(), cter.getValue());
            }    
           
            Iterator<Counter> iter2 = multiloadgroup.iterator();
            while (iter2.hasNext()) {
                Counter cter = iter2.next();
                multiInputCounters.put(cter.getName(), cter.getValue());
            }
           
        }             
    }
View Full Code Here

          || System.currentTimeMillis() >= reportTime + maxReportInterval) {
        // find out CPU msecs
        // In the case that we can't find out this number, we just skip the step to print
        // it out.
        if (ctrs != null) {
          Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
              "CPU_MILLISECONDS");
          if (counterCpuMsec != null) {
            long newCpuMSec = counterCpuMsec.getValue();
            if (newCpuMSec > 0) {
              cpuMsec = newCpuMSec;
              report += ", Cumulative CPU "
                + (cpuMsec / 1000D) + " sec";
            }
          }
        }

        // write out serialized plan with counters to log file
        // LOG.info(queryPlan);
        String output = dateFormat.format(Calendar.getInstance().getTime()) + report;
        SessionState ss = SessionState.get();
        if (ss != null) {
          ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
          ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
              Keys.TASK_HADOOP_PROGRESS, output);
          if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
            ss.getHiveHistory().progressTask(SessionState.get().getQueryId(), this.task);
            this.callBackObj.logPlanProgress(ss);
          }
        }
        console.printInfo(output);
        lastReport = report;
        reportTime = System.currentTimeMillis();
      }
    }

    if (cpuMsec > 0) {
      console.printInfo("MapReduce Total cumulative CPU time: "
          + Utilities.formatMsecToStr(cpuMsec));
    }

    boolean success;

    Counters ctrs = th.getCounters();
    if (fatal) {
      success = false;
    } else {
      // check for fatal error again in case it occurred after
      // the last check before the job is completed
      if (checkFatalErrors(ctrs, errMsg)) {
        console.printError("[Fatal Error] " + errMsg.toString());
        success = false;
      } else {
        SessionState ss = SessionState.get();
        if (ss != null) {
          ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
        }
        success = rj.isSuccessful();
      }
    }

    if (ctrs != null) {
      Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
          "CPU_MILLISECONDS");
      if (counterCpuMsec != null) {
        long newCpuMSec = counterCpuMsec.getValue();
        if (newCpuMSec > cpuMsec) {
          cpuMsec = newCpuMSec;
        }
      }
    }
View Full Code Here

            Iterator<Counter> it = groupCounters.iterator();
            HashMap<Integer,Long> counterList = new HashMap<Integer, Long>();

            while(it.hasNext()) {
                try{
                    Counter c = it.next();
                    counterList.put(Integer.valueOf(c.getDisplayName()), c.getValue());
                } catch (Exception ex) {
                    ex.printStackTrace();
                }
            }
            counterSize = counterList.size();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapred.Counters.Counter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.