Package org.apache.hadoop.mapred

Examples of org.apache.hadoop.mapred.Counters$Group


     * @param operationID After being collected on global counters (POCounter),
     * these values are passed via configuration file to PORank, by using the unique
     * operation identifier
     */
    private void saveCounters(Job job, String operationID) {
        Counters counters;
        Group groupCounters;

        Long previousValue = 0L;
        Long previousSum = 0L;
        ArrayList<Pair<String,Long>> counterPairs;

        try {
            counters = HadoopShims.getCounters(job);

            String groupName = getGroupName(counters.getGroupNames());
            // In case that the counter group was not find, we need to find
            // out why. Only acceptable state is that the relation has been
            // empty.
            if (groupName == null) {
                Counter outputRecords =
                    counters.getGroup(MRPigStatsUtil.TASK_COUNTER_GROUP)
                    .getCounterForName(MRPigStatsUtil.MAP_OUTPUT_RECORDS);

                if(outputRecords.getCounter() == 0) {
                    globalCounters.put(operationID, new ArrayList<Pair<String, Long>>());
                    return;
                } else {
                  throw new RuntimeException("Did not found RANK counter group for operationId: " + operationID);
                }
            }
            groupCounters = counters.getGroup(groupName);

            Iterator<Counter> it = groupCounters.iterator();
            HashMap<Integer,Long> counterList = new HashMap<Integer, Long>();

            while(it.hasNext()) {
View Full Code Here


       
        try {
            String[] args = { PIG_FILE };
            PigStats stats = PigRunner.run(args, new TestNotificationListener());
           
            Counters counter= ((MRJobStats)stats.getJobGraph().getSinks().get(0)).getHadoopCounters();
            assertEquals(5, counter.getGroup(MRPigStatsUtil.TASK_COUNTER_GROUP).getCounterForName(
                    MRPigStatsUtil.MAP_INPUT_RECORDS).getValue());
            assertEquals(3, counter.getGroup(MRPigStatsUtil.TASK_COUNTER_GROUP).getCounterForName(
                    MRPigStatsUtil.MAP_OUTPUT_RECORDS).getValue());
            assertEquals(2, counter.getGroup(MRPigStatsUtil.TASK_COUNTER_GROUP).getCounterForName(
                    MRPigStatsUtil.REDUCE_INPUT_RECORDS).getValue());
            assertEquals(0, counter.getGroup(MRPigStatsUtil.TASK_COUNTER_GROUP).getCounterForName(
                    MRPigStatsUtil.REDUCE_OUTPUT_RECORDS).getValue());
            assertEquals(20,counter.getGroup(MRPigStatsUtil.FS_COUNTER_GROUP).getCounterForName(
                    MRPigStatsUtil.HDFS_BYTES_WRITTEN).getValue());
           
            // Skip for hadoop 20.203+, See PIG-2446
            if (Util.isHadoop203plus())
                return;
           
            assertEquals(30,counter.getGroup(MRPigStatsUtil.FS_COUNTER_GROUP).getCounterForName(
                    MRPigStatsUtil.HDFS_BYTES_READ).getValue());
        } finally {
            new File(PIG_FILE).delete();
            Util.deleteFile(cluster, OUTPUT_FILE);
            Util.deleteFile(cluster, OUTPUT_FILE_2);
View Full Code Here

      // let the job retry several times, which eventually lead to failure.
      if (fatal) {
        continue; // wait until rj.isComplete
      }

      Counters ctrs = th.getCounters();

      if (fatal = checkFatalErrors(ctrs, errMsg)) {
        console.printError("[Fatal Error] " + errMsg.toString() + ". Killing the job.");
        rj.killJob();
        continue;
      }
      errMsg.setLength(0);

      updateCounters(ctrs, rj);

      // Prepare data for Client Stat Publishers (if any present) and execute them
      if (clientStatPublishers.size() > 0 && ctrs != null) {
        Map<String, Double> exctractedCounters = extractAllCounterValues(ctrs);
        for (ClientStatsPublisher clientStatPublisher : clientStatPublishers) {
          try {
            clientStatPublisher.run(exctractedCounters, rj.getID().toString());
          } catch (RuntimeException runtimeException) {
            LOG.error("Exception " + runtimeException.getClass().getCanonicalName()
                + " thrown when running clientStatsPublishers. The stack trace is: ",
                runtimeException);
          }
        }
      }

      String report = " " + getId() + " map = " + mapProgress + "%,  reduce = " + reduceProgress
          + "%";


      if (!report.equals(lastReport)
          || System.currentTimeMillis() >= reportTime + maxReportInterval) {
        // find out CPU msecs
        // In the case that we can't find out this number, we just skip the step to print
        // it out.
        if (ctrs != null) {
          Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
              "CPU_MILLISECONDS");
          if (counterCpuMsec != null) {
            long newCpuMSec = counterCpuMsec.getValue();
            if (newCpuMSec > 0) {
              cpuMsec = newCpuMSec;
              report += ", Cumulative CPU "
                + (cpuMsec / 1000D) + " sec";
            }
          }
        }

        // write out serialized plan with counters to log file
        // LOG.info(queryPlan);
        String output = dateFormat.format(Calendar.getInstance().getTime()) + report;
        SessionState ss = SessionState.get();
        if (ss != null) {
          ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
          ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
              Keys.TASK_HADOOP_PROGRESS, output);
          ss.getHiveHistory().progressTask(SessionState.get().getQueryId(), this.task);
          this.callBackObj.logPlanProgress(ss);
        }
        console.printInfo(output);
        lastReport = report;
        reportTime = System.currentTimeMillis();
      }
    }

    if (cpuMsec > 0) {
      console.printInfo("MapReduce Total cumulative CPU time: "
          + Utilities.formatMsecToStr(cpuMsec));
    }

    boolean success;

    Counters ctrs = th.getCounters();
    if (fatal) {
      success = false;
    } else {
      // check for fatal error again in case it occurred after
      // the last check before the job is completed
      if (checkFatalErrors(ctrs, errMsg)) {
        console.printError("[Fatal Error] " + errMsg.toString());
        success = false;
      } else {
        SessionState ss = SessionState.get();
        if (ss != null) {
          ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
        }
        success = rj.isSuccessful();
      }
    }

    if (ctrs != null) {
      Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
          "CPU_MILLISECONDS");
      if (counterCpuMsec != null) {
        long newCpuMSec = counterCpuMsec.getValue();
        if (newCpuMSec > cpuMsec) {
          cpuMsec = newCpuMSec;
View Full Code Here

    }

    public static boolean isMainSuccessful(RunningJob runningJob) throws IOException {
        boolean succeeded = runningJob.isSuccessful();
        if (succeeded) {
            Counters counters = runningJob.getCounters();
            if (counters != null) {
                Counters.Group group = counters.getGroup(COUNTER_GROUP);
                if (group != null) {
                    succeeded = group.getCounter(COUNTER_LAUNCHER_ERROR) == 0;
                }
            }
        }
View Full Code Here

        return succeeded;
    }

    public static boolean hasOutputData(RunningJob runningJob) throws IOException {
        boolean output = false;
        Counters counters = runningJob.getCounters();
        if (counters != null) {
            Counters.Group group = counters.getGroup(COUNTER_GROUP);
            if (group != null) {
                output = group.getCounter(COUNTER_OUTPUT_DATA) == 1;
            }
        }
        return output;
View Full Code Here

     * @return returns whether the running Job has stats data or not
     * @throws IOException
     */
    public static boolean hasStatsData(RunningJob runningJob) throws IOException{
        boolean output = false;
        Counters counters = runningJob.getCounters();
        if (counters != null) {
            Counters.Group group = counters.getGroup(COUNTER_GROUP);
            if (group != null) {
                output = group.getCounter(COUNTER_STATS_DATA) == 1;
            }
        }
        return output;
View Full Code Here

     * @return
     * @throws IOException
     */
    public static boolean hasIdSwap(RunningJob runningJob) throws IOException {
        boolean swap = false;
        Counters counters = runningJob.getCounters();
        if (counters != null) {
            Counters.Group group = counters.getGroup(COUNTER_GROUP);
            if (group != null) {
                swap = group.getCounter(COUNTER_DO_ID_SWAP) == 1;
            }
        }
        return swap;
View Full Code Here

            throws IOException, HadoopAccessorException {
        boolean swap = false;

        XLog log = XLog.getLog("org.apache.oozie.action.hadoop.LauncherMapper");

        Counters counters = runningJob.getCounters();
        if (counters != null) {
            Counters.Group counterGroup = counters.getGroup(COUNTER_GROUP);
            if (counterGroup != null) {
                swap = counterGroup.getCounter(COUNTER_DO_ID_SWAP) == 1;
            }
        }
        // additional check for swapped hadoop ID
View Full Code Here

                Element actionXml = XmlUtils.parseXml(action.getConf());
                JobConf jobConf = createBaseHadoopConf(context, actionXml);
                jobClient = createJobClient(context, jobConf);

                // Cumulative counters for all Sqoop mapreduce jobs
                Counters counters = null;

                String externalIds = action.getExternalChildIDs();
                String []jobIds = externalIds.split(",");

                for(String jobId : jobIds) {
                    RunningJob runningJob = jobClient.getJob(JobID.forName(jobId));
                    if (runningJob == null) {
                      throw new ActionExecutorException(ActionExecutorException.ErrorType.FAILED, "SQOOP001",
                        "Unknown hadoop job [{0}] associated with action [{1}].  Failing this action!", action
                        .getExternalId(), action.getId());
                    }

                    Counters taskCounters = runningJob.getCounters();
                    if(taskCounters != null) {
                        if(counters == null) {
                          counters = taskCounters;
                        } else {
                          counters.incrAllCounters(taskCounters);
View Full Code Here

                    throw new ActionExecutorException(ActionExecutorException.ErrorType.FAILED, "MR002",
                                                      "Unknown hadoop job [{0}] associated with action [{1}].  Failing this action!", action
                            .getExternalId(), action.getId());
                }

                Counters counters = runningJob.getCounters();
                if (counters != null) {
                    ActionStats stats = new MRStats(counters);
                    String statsJsonString = stats.toJSON();
                    context.setVar(HADOOP_COUNTERS, statsJsonString);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapred.Counters$Group

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.