Package org.apache.hadoop.mapred

Examples of org.apache.hadoop.mapred.Counters$CountersExceededException


      JobID mapRedJobID = job.getAssignedJobID();
      RunningJob runningJob = null;
      try {
        runningJob = jobClient.getJob(mapRedJobID);
        if(runningJob != null) {
        Counters counters = runningJob.getCounters();
            for(Enum e : PigWarning.values()) {
              Long currentCount = aggMap.get(e);
              currentCount = (currentCount == null? 0 : currentCount);
              currentCount += counters.getCounter(e);
              aggMap.put(e, currentCount);
            }
        }
      } catch (IOException ioe) {
        String msg = "Unable to retrieve job to compute warning aggregation.";
View Full Code Here


    while (ite.hasNext()) {
      JobID simuJobId = ite.next();
     
      JobHistory.JobInfo jhInfo = getSimulatedJobHistory(simuJobId);
      Assert.assertNotNull("Job history not found.", jhInfo);
      Counters counters =
          Counters.fromEscapedCompactString(jhInfo.getValues()
              .get(JobHistory.Keys.COUNTERS));
      JobConf simuJobConf = getSimulatedJobConf(simuJobId, destFolder);
      int cnt = 1;
      do {
View Full Code Here

        }
      }
      LOG.info("Total Heap Usage of Reduces for original job: "
              + origJobReducesTHU);
     
      Counters mapCounters =
          Counters.fromEscapedCompactString(jhInfo.getValues()
                  .get(JobHistory.Keys.MAP_COUNTERS));

      Counters reduceCounters =
          Counters.fromEscapedCompactString(jhInfo.getValues()
                  .get(JobHistory.Keys.REDUCE_COUNTERS));

      simuJobMapsTHU =
          getCounterValue(mapCounters,
View Full Code Here

   * @throws Exception - if an error occurs.
   */
  private Map<String,Long> getSimulatedJobCPUMetrics(
                           JobHistory.JobInfo jhInfo) throws Exception {
    Map<String, Long> resourceMetrics = new HashMap<String, Long>();
    Counters mapCounters = Counters.fromEscapedCompactString(
        jhInfo.getValues().get(JobHistory.Keys.MAP_COUNTERS));
    long mapCPUUsage =
        getCounterValue(mapCounters,
                        Task.Counter.CPU_MILLISECONDS.toString());
    resourceMetrics.put("MAP", mapCPUUsage);

    Counters reduceCounters = Counters.fromEscapedCompactString(
        jhInfo.getValues().get(JobHistory.Keys.REDUCE_COUNTERS));
    long reduceCPUUsage =
        getCounterValue(reduceCounters,
                        Task.Counter.CPU_MILLISECONDS.toString());
    resourceMetrics.put("REDUCE", reduceCPUUsage);
View Full Code Here

    long startTime = System.currentTimeMillis();
    RunningJob job = JobClient.runJob(conf);
    sLogger.info("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0
        + " seconds");

    Counters counters = job.getCounters();

    long totalNumTerms = counters.findCounter("org.apache.hadoop.mapred.Task$Counter", 6,
        "REDUCE_INPUT_GROUPS").getCounter();

    sLogger.info("total number of terms in global dictionary = " + totalNumTerms);

    // now build the dictionary
View Full Code Here

    sLogger.info("Number of results: " + (numResults==-1 ? "all" : numResults));

    long startTime = System.currentTimeMillis();
    RunningJob j = JobClient.runJob(job);
    System.out.println("Job finished in "+(System.currentTimeMillis()-startTime)+" milliseconds");
    Counters counters = j.getCounters();
    long processed = (long) counters.findCounter(mapoutput.PROCESSEDPAIRS).getCounter();
    long prefixsum = (long) counters.findCounter(mapoutput.PrefixSum).getCounter();
    System.out.println("Avg prefix length = "+(prefixsum/(float)processed));
   
    return 0;
  }
View Full Code Here

    long startTime = System.currentTimeMillis();

    RunningJob rj = JobClient.runJob(conf);
    sLogger.info("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0
        + " seconds");
    Counters counters = rj.getCounters();

    long numOfDocs= (long) counters.findCounter(Docs.Total).getCounter();

    return (int) numOfDocs;
  }
View Full Code Here

        JobID mapRedJobID = job.getAssignedJobID();
        RunningJob runningJob = null;
        try {
            runningJob = jobClient.getJob(mapRedJobID);
            if(runningJob != null) {
                Counters counters = runningJob.getCounters();
                if (counters==null)
                {
                    long nullCounterCount = aggMap.get(PigWarning.NULL_COUNTER_COUNT)==null?0 : aggMap.get(PigWarning.NULL_COUNTER_COUNT);
                    nullCounterCount++;
                    aggMap.put(PigWarning.NULL_COUNTER_COUNT, nullCounterCount);
                }
                for (Enum e : PigWarning.values()) {
                    if (e != PigWarning.NULL_COUNTER_COUNT) {
                        Long currentCount = aggMap.get(e);
                        currentCount = (currentCount == null ? 0 : currentCount);
                        // This code checks if the counters is null, if it is,
                        // we need to report to the user that the number
                        // of warning aggregations may not be correct. In fact,
                        // Counters should not be null, it is
                        // a hadoop bug, once this bug is fixed in hadoop, the
                        // null handling code should never be hit.
                        // See Pig-943
                        if (counters != null)
                            currentCount += counters.getCounter(e);
                        aggMap.put(e, currentCount);
                    }
                }
            }
        } catch (IOException ioe) {
View Full Code Here

 
  /*
   * Parse and add the job counters
   */
  private void parseAndAddJobCounters(Hashtable<Enum, String> job, String counters) throws ParseException {
    Counters cnt = Counters.fromEscapedCompactString(counters);
    for (java.util.Iterator<Counters.Group> grps = cnt.iterator(); grps.hasNext(); ) {
      Counters.Group grp = grps.next();
      //String groupname = "<" + grp.getName() + ">::<" + grp.getDisplayName() + ">";
      for (java.util.Iterator<Counters.Counter> mycounters = grp.iterator(); mycounters.hasNext(); ) {
        Counters.Counter counter = mycounters.next();
        //String countername = "<"+counter.getName()+">::<"+counter.getDisplayName()+">::<"+counter.getValue()+">";
View Full Code Here

 
  /*
   * Parse and add the Map task counters
   */
  private void parseAndAddMapTaskCounters(MapTaskStatistics mapTask, String counters) throws ParseException {
    Counters cnt = Counters.fromEscapedCompactString(counters);
    for (java.util.Iterator<Counters.Group> grps = cnt.iterator(); grps.hasNext(); ) {
      Counters.Group grp = grps.next();
      //String groupname = "<" + grp.getName() + ">::<" + grp.getDisplayName() + ">";
      for (java.util.Iterator<Counters.Counter> mycounters = grp.iterator(); mycounters.hasNext(); ) {
        Counters.Counter counter = mycounters.next();
        //String countername = "<"+counter.getName()+">::<"+counter.getDisplayName()+">::<"+counter.getValue()+">";
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapred.Counters$CountersExceededException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.