Package org.apache.hadoop.mapred

Examples of org.apache.hadoop.mapred.Counters$Counter


      } else {
        rJob = Submitter.runJob(job);
      }
      assertTrue("pipes job failed", rJob.isSuccessful());
     
      Counters counters = rJob.getCounters();
      Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
      int numCounters = 0;
      for (Counter c : wordCountCounters) {
        System.out.println(c);
        ++numCounters;
      }
View Full Code Here


    assertTrue(runningJob.isSuccessful());
   
    if(validateCount) {
     //validate counters
      String counterGrp = "org.apache.hadoop.mapred.Task$Counter";
      Counters counters = runningJob.getCounters();
      assertEquals(counters.findCounter(counterGrp, "MAP_SKIPPED_RECORDS").
          getCounter(),MAPPER_BAD_RECORDS.size());
     
      int mapRecs = INPUTSIZE - MAPPER_BAD_RECORDS.size();
      assertEquals(counters.findCounter(counterGrp, "MAP_INPUT_RECORDS").
          getCounter(),mapRecs);
      assertEquals(counters.findCounter(counterGrp, "MAP_OUTPUT_RECORDS").
          getCounter(),mapRecs);
     
      int redRecs = mapRecs - REDUCER_BAD_RECORDS.size();
      assertEquals(counters.findCounter(counterGrp, "REDUCE_SKIPPED_RECORDS").
          getCounter(),REDUCER_BAD_RECORDS.size());
      assertEquals(counters.findCounter(counterGrp, "REDUCE_SKIPPED_GROUPS").
          getCounter(),REDUCER_BAD_RECORDS.size());
      assertEquals(counters.findCounter(counterGrp, "REDUCE_INPUT_GROUPS").
          getCounter(),redRecs);
      assertEquals(counters.findCounter(counterGrp, "REDUCE_INPUT_RECORDS").
          getCounter(),redRecs);
      assertEquals(counters.findCounter(counterGrp, "REDUCE_OUTPUT_RECORDS").
          getCounter(),redRecs);
    }
   
    List<String> badRecs = new ArrayList<String>();
    badRecs.addAll(MAPPER_BAD_RECORDS);
View Full Code Here

 
  /*
   * Parse and add the job counters
   */
  private void parseAndAddJobCounters(Hashtable<Enum, String> job, String counters) throws ParseException {
    Counters cnt = Counters.fromEscapedCompactString(counters);
    for (java.util.Iterator<Counters.Group> grps = cnt.iterator(); grps.hasNext(); ) {
      Counters.Group grp = grps.next();
      //String groupname = "<" + grp.getName() + ">::<" + grp.getDisplayName() + ">";
      for (java.util.Iterator<Counters.Counter> mycounters = grp.iterator(); mycounters.hasNext(); ) {
        Counters.Counter counter = mycounters.next();
        //String countername = "<"+counter.getName()+">::<"+counter.getDisplayName()+">::<"+counter.getValue()+">";
View Full Code Here

 
  /*
   * Parse and add the Map task counters
   */
  private void parseAndAddMapTaskCounters(MapTaskStatistics mapTask, String counters) throws ParseException {
    Counters cnt = Counters.fromEscapedCompactString(counters);
    for (java.util.Iterator<Counters.Group> grps = cnt.iterator(); grps.hasNext(); ) {
      Counters.Group grp = grps.next();
      //String groupname = "<" + grp.getName() + ">::<" + grp.getDisplayName() + ">";
      for (java.util.Iterator<Counters.Counter> mycounters = grp.iterator(); mycounters.hasNext(); ) {
        Counters.Counter counter = mycounters.next();
        //String countername = "<"+counter.getName()+">::<"+counter.getDisplayName()+">::<"+counter.getValue()+">";
View Full Code Here

 
  /*
   * Parse and add the reduce task counters
   */
  private void parseAndAddReduceTaskCounters(ReduceTaskStatistics reduceTask, String counters) throws ParseException {
    Counters cnt = Counters.fromEscapedCompactString(counters);
    for (java.util.Iterator<Counters.Group> grps = cnt.iterator(); grps.hasNext(); ) {
      Counters.Group grp = grps.next();
      //String groupname = "<" + grp.getName() + ">::<" + grp.getDisplayName() + ">";
      for (java.util.Iterator<Counters.Counter> mycounters = grp.iterator(); mycounters.hasNext(); ) {
        Counters.Counter counter = mycounters.next();
        //String countername = "<"+counter.getName()+">::<"+counter.getDisplayName()+">::<"+counter.getValue()+">";
View Full Code Here

      long[] specOutputRecords = new long[runTasks.length];
      long[] specOutputBytes = new long[runTasks.length];

      for (int i = 0; i < runTasks.length; ++i) {
        final TaskInfo specInfo;
        final Counters counters = runTasks[i].getCounters();
        switch (type) {
          case MAP:
             runInputBytes[i] = counters.findCounter("FileSystemCounters",
                 "HDFS_BYTES_READ").getValue() -
                 counters.findCounter(SPLIT_RAW_BYTES).getValue();
             runInputRecords[i] =
               (int)counters.findCounter(MAP_INPUT_RECORDS).getValue();
             runOutputBytes[i] =
               counters.findCounter(MAP_OUTPUT_BYTES).getValue();
             runOutputRecords[i] =
               (int)counters.findCounter(MAP_OUTPUT_RECORDS).getValue();

            specInfo = spec.getTaskInfo(TaskType.MAP, i);
            specInputRecords[i] = specInfo.getInputRecords();
            specInputBytes[i] = specInfo.getInputBytes();
            specOutputRecords[i] = specInfo.getOutputRecords();
            specOutputBytes[i] = specInfo.getOutputBytes();
            System.out.printf(type + " SPEC: %9d -> %9d :: %5d -> %5d\n",
                 specInputBytes[i], specOutputBytes[i],
                 specInputRecords[i], specOutputRecords[i]);
            System.out.printf(type + " RUN:  %9d -> %9d :: %5d -> %5d\n",
                 runInputBytes[i], runOutputBytes[i],
                 runInputRecords[i], runOutputRecords[i]);
            break;
          case REDUCE:
            runInputBytes[i] = 0;
            runInputRecords[i] =
              (int)counters.findCounter(REDUCE_INPUT_RECORDS).getValue();
            runOutputBytes[i] =
              counters.findCounter("FileSystemCounters",
                  "HDFS_BYTES_WRITTEN").getValue();
            runOutputRecords[i] =
              (int)counters.findCounter(REDUCE_OUTPUT_RECORDS).getValue();


            specInfo = spec.getTaskInfo(TaskType.REDUCE, i);
            // There is no reliable counter for reduce input bytes. The
            // variable-length encoding of intermediate records and other noise
            // make this quantity difficult to estimate. The shuffle and spec
            // input bytes are included in debug output for reference, but are
            // not checked
            specInputBytes[i] = 0;
            specInputRecords[i] = specInfo.getInputRecords();
            specOutputRecords[i] = specInfo.getOutputRecords();
            specOutputBytes[i] = specInfo.getOutputBytes();
            System.out.printf(type + " SPEC: (%9d) -> %9d :: %5d -> %5d\n",
                 specInfo.getInputBytes(), specOutputBytes[i],
                 specInputRecords[i], specOutputRecords[i]);
            System.out.printf(type + " RUN:  (%9d) -> %9d :: %5d -> %5d\n",
                 counters.findCounter(REDUCE_SHUFFLE_BYTES).getValue(),
                 runOutputBytes[i], runInputRecords[i], runOutputRecords[i]);
            break;
          default:
            specInfo = null;
            fail("Unexpected type: " + type);
View Full Code Here

     * Parse and add the job counters
     */
    @SuppressWarnings("deprecation")
    private static void parseAndAddJobCounters(Map<String, String> job, String counters) {
        try {
            Counters counterGroups = Counters.fromEscapedCompactString(counters);
            for (Group otherGroup : counterGroups) {
                Group group = counterGroups.getGroup(otherGroup.getName());
                for (Counter otherCounter : otherGroup) {
                    Counter counter = group.getCounterForName(otherCounter.getName());
                    job.put(otherCounter.getName(), String.valueOf(counter.getValue()));
                }
            }
View Full Code Here

                    case FINISH_TIME:
                        endTime = Long.valueOf(val);
                        break;                   
                    case COUNTERS: {
                        try {
                            Counters counters = Counters.fromEscapedCompactString(val);
                            long rows = counters.getGroup(TASK_COUNTER_GROUP)
                                    .getCounterForName(MAP_INPUT_RECORDS).getCounter();
                            if (rows < minMapRows) minMapRows = rows;
                            if (rows > maxMapRows) maxMapRows = rows;
                        } catch (ParseException e) {
                            LOG.warn("Failed to parse job counters", e);
                        }
                    }
                    break;
                    default:
                        LOG.warn("JobHistory.Keys." + key
                                + " : NOT INCLUDED IN PERFORMANCE ADVISOR MAP COUNTERS");
                        break;
                    }
                }
                duration = endTime - startTime;
                if (minMapTime > duration) minMapTime = duration;
                if (maxMapTime < duration) maxMapTime = duration;
                totalMapTime += duration;       
            } else if (task.get(Keys.TASK_TYPE).equals("REDUCE")) {
                Map<JobHistory.Keys, String> reduceTask = task.getValues();
                Map<JobHistory.Keys, String> successTaskAttemptMap  =  getLastSuccessfulTaskAttempt(task);
                // NOTE: Following would lead to less number of actual tasks collected in the tasklist array
                if (successTaskAttemptMap != null) {
                    reduceTask.putAll(successTaskAttemptMap);
                } else {
                    LOG.warn("Task:<" + task.get(Keys.TASKID) + "> is not successful - SKIPPING");
                }
                long duration = 0;
                long startTime = 0;
                long endTime = 0;
                int size = reduceTask.size();
                numberReduces++;

                Iterator<Map.Entry<JobHistory.Keys, String>> kv = reduceTask.entrySet().iterator();
                for (int j = 0; j < size; j++) {
                    Map.Entry<JobHistory.Keys, String> rtc = kv.next();
                    JobHistory.Keys key = rtc.getKey();
                    String val = rtc.getValue();
                    switch (key) {
                    case START_TIME:
                        startTime = Long.valueOf(val);
                        break;
                    case FINISH_TIME:
                        endTime = Long.valueOf(val);
                        break;
                    case COUNTERS: {
                        try {
                            Counters counters = Counters.fromEscapedCompactString(val);
                            long rows = counters.getGroup(TASK_COUNTER_GROUP)
                                    .getCounterForName(REDUCE_INPUT_RECORDS).getCounter();
                            if (rows < minReduceRows) minReduceRows = rows;
                            if (rows > maxReduceRows) maxReduceRows = rows;
                        } catch (ParseException e) {
                            LOG.warn("Failed to parse job counters", e);
View Full Code Here

        return fs.getDefaultBlockSize(path);
    }

    public static Counters getCounters(Job job) throws IOException {
        try {
            return new Counters(job.getJob().getCounters());
        } catch (Exception ir) {
            throw new IOException(ir);
        }
    }
View Full Code Here

    }

    @SuppressWarnings("deprecation")
    void computeWarningAggregate(Job job, Map<Enum, Long> aggMap) {
        try {
            Counters counters = HadoopShims.getCounters(job);
            if (counters==null)
            {
                long nullCounterCount =
                        (aggMap.get(PigWarning.NULL_COUNTER_COUNT) == null)
                          ? 0
                          : aggMap.get(PigWarning.NULL_COUNTER_COUNT);
                nullCounterCount++;
                aggMap.put(PigWarning.NULL_COUNTER_COUNT, nullCounterCount);
            }
            for (Enum e : PigWarning.values()) {
                if (e != PigWarning.NULL_COUNTER_COUNT) {
                    Long currentCount = aggMap.get(e);
                    currentCount = (currentCount == null ? 0 : currentCount);
                    // This code checks if the counters is null, if it is,
                    // we need to report to the user that the number
                    // of warning aggregations may not be correct. In fact,
                    // Counters should not be null, it is
                    // a hadoop bug, once this bug is fixed in hadoop, the
                    // null handling code should never be hit.
                    // See Pig-943
                    if (counters != null)
                        currentCount += counters.getCounter(e);
                    aggMap.put(e, currentCount);
                }
            }
        } catch (Exception e) {
            String msg = "Unable to retrieve job to compute warning aggregation.";
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapred.Counters$Counter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.