Package org.apache.hadoop.mapred

Examples of org.apache.hadoop.mapred.Counters$Group


      // set the sysCL as the TCCL
      Thread.currentThread().setContextClassLoader(ClassLoader.getSystemClassLoader());

      // fix org.apache.hadoop.mapred.Counters#MAX_COUNTER_LIMIT
      // calling constructor since class loading is lazy
      new Counters();
    } finally {
      Thread.currentThread().setContextClassLoader(cl);
    }
  }
View Full Code Here


                        + ", save keys: " + saveKeys + ", reducerPerBucket: " + reducerPerBucket);
            logger.info("Building store...");
            RunningJob job = JobClient.runJob(conf);

            // Once the job has completed log the counter
            Counters counters = job.getCounters();

            if(saveKeys) {
                if(reducerPerBucket) {
                    logger.info("Number of collisions in the job - "
                                + counters.getCounter(KeyValueWriter.CollisionCounter.NUM_COLLISIONS));
                    logger.info("Maximum number of collisions for one entry - "
                                + counters.getCounter(KeyValueWriter.CollisionCounter.MAX_COLLISIONS));
                } else {
                    logger.info("Number of collisions in the job - "
                                + counters.getCounter(KeyValueWriter.CollisionCounter.NUM_COLLISIONS));
                    logger.info("Maximum number of collisions for one entry - "
                                + counters.getCounter(KeyValueWriter.CollisionCounter.MAX_COLLISIONS));
                }
            }

            // Do a CheckSumOfCheckSum - Similar to HDFS
            CheckSum checkSumGenerator = CheckSum.getInstance(this.checkSumType);
View Full Code Here

  public void testCommandLine() throws Exception {
    super.testCommandLine();
    // validate combiner counters
    String counterGrp = "org.apache.hadoop.mapred.Task$Counter";
    Counters counters = job.running_.getCounters();
    assertTrue(counters.findCounter(
               counterGrp, "COMBINE_INPUT_RECORDS").getValue() != 0);
    assertTrue(counters.findCounter(
               counterGrp, "COMBINE_OUTPUT_RECORDS").getValue() != 0);
  }
View Full Code Here

    this();
    setMapper(m);
  }

  public MapDriver() {
    setCounters(new Counters());
  }
View Full Code Here

        return sb.toString();
    }

    @SuppressWarnings("deprecation")
    void addCounters(RunningJob rjob) {
        Counters counters = null;
        if (rjob != null) {
            try {
                counters = rjob.getCounters();
            } catch (IOException e) {
                LOG.warn("Unable to get job counters", e);
            }
        }
        if (counters != null) {
            Counters.Group taskgroup = counters
                    .getGroup(PigStatsUtil.TASK_COUNTER_GROUP);
            Counters.Group hdfsgroup = counters
                    .getGroup(PigStatsUtil.FS_COUNTER_GROUP);
            Counters.Group multistoregroup = counters
                    .getGroup(PigStatsUtil.MULTI_STORE_COUNTER_GROUP);
            Counters.Group multiloadgroup = counters
                    .getGroup(PigStatsUtil.MULTI_INPUTS_COUNTER_GROUP);

            mapInputRecords = taskgroup.getCounterForName(
                    PigStatsUtil.MAP_INPUT_RECORDS).getCounter();
            mapOutputRecords = taskgroup.getCounterForName(
                    PigStatsUtil.MAP_OUTPUT_RECORDS).getCounter();
            reduceInputRecords = taskgroup.getCounterForName(
                    PigStatsUtil.REDUCE_INPUT_RECORDS).getCounter();
            reduceOutputRecords = taskgroup.getCounterForName(
                    PigStatsUtil.REDUCE_OUTPUT_RECORDS).getCounter();
            hdfsBytesRead = hdfsgroup.getCounterForName(
                    PigStatsUtil.HDFS_BYTES_READ).getCounter();     
            hdfsBytesWritten = hdfsgroup.getCounterForName(
                    PigStatsUtil.HDFS_BYTES_WRITTEN).getCounter();           
            spillCount = counters.findCounter(
                    PigCounters.SPILLABLE_MEMORY_MANAGER_SPILL_COUNT)
                    .getCounter();
            activeSpillCountObj = counters.findCounter(
                    PigCounters.PROACTIVE_SPILL_COUNT_BAGS).getCounter();
            activeSpillCountRecs = counters.findCounter(
                    PigCounters.PROACTIVE_SPILL_COUNT_RECS).getCounter();

            Iterator<Counter> iter = multistoregroup.iterator();
            while (iter.hasNext()) {
                Counter cter = iter.next();
View Full Code Here

     * Parse and add the job counters
     */
    @SuppressWarnings("deprecation")
    private static void parseAndAddJobCounters(Map<String, String> job, String counters) {
        try {
            Counters counterGroups = Counters.fromEscapedCompactString(counters);
            for (Group otherGroup : counterGroups) {
                Group group = counterGroups.getGroup(otherGroup.getName());
                for (Counter otherCounter : otherGroup) {
                    Counter counter = group.getCounterForName(otherCounter.getName());
                    job.put(otherCounter.getName(), String.valueOf(counter.getValue()));
                }
            }
View Full Code Here

                    case FINISH_TIME:
                        endTime = Long.valueOf(val);
                        break;                   
                    case COUNTERS: {
                        try {
                            Counters counters = Counters.fromEscapedCompactString(val);
                            long rows = counters.getGroup(TASK_COUNTER_GROUP)
                                    .getCounterForName(MAP_INPUT_RECORDS).getCounter();
                            if (rows < minMapRows) minMapRows = rows;
                            if (rows > maxMapRows) maxMapRows = rows;
                        } catch (ParseException e) {
                            LOG.warn("Failed to parse job counters", e);
                        }
                    }
                    break;
                    default:
                        LOG.warn("JobHistory.Keys." + key
                                + " : NOT INCLUDED IN PERFORMANCE ADVISOR MAP COUNTERS");
                        break;
                    }
                }
                duration = endTime - startTime;
                if (minMapTime > duration) minMapTime = duration;
                if (maxMapTime < duration) maxMapTime = duration;
                totalMapTime += duration;       
            } else if (task.get(Keys.TASK_TYPE).equals("REDUCE")) {
                Map<JobHistory.Keys, String> reduceTask = task.getValues();
                Map<JobHistory.Keys, String> successTaskAttemptMap  =  getLastSuccessfulTaskAttempt(task);
                // NOTE: Following would lead to less number of actual tasks collected in the tasklist array
                if (successTaskAttemptMap != null) {
                    reduceTask.putAll(successTaskAttemptMap);
                } else {
                    LOG.warn("Task:<" + task.get(Keys.TASKID) + "> is not successful - SKIPPING");
                }
                long duration = 0;
                long startTime = 0;
                long endTime = 0;
                int size = reduceTask.size();
                numberReduces++;

                Iterator<Map.Entry<JobHistory.Keys, String>> kv = reduceTask.entrySet().iterator();
                for (int j = 0; j < size; j++) {
                    Map.Entry<JobHistory.Keys, String> rtc = kv.next();
                    JobHistory.Keys key = rtc.getKey();
                    String val = rtc.getValue();
                    switch (key) {
                    case START_TIME:
                        startTime = Long.valueOf(val);
                        break;
                    case FINISH_TIME:
                        endTime = Long.valueOf(val);
                        break;
                    case COUNTERS: {
                        try {
                            Counters counters = Counters.fromEscapedCompactString(val);
                            long rows = counters.getGroup(TASK_COUNTER_GROUP)
                                    .getCounterForName(REDUCE_INPUT_RECORDS).getCounter();
                            if (rows < minReduceRows) minReduceRows = rows;
                            if (rows > maxReduceRows) maxReduceRows = rows;
                        } catch (ParseException e) {
                            LOG.warn("Failed to parse job counters", e);
View Full Code Here

        JobID mapRedJobID = job.getAssignedJobID();
        RunningJob runningJob = null;
        try {
            runningJob = jobClient.getJob(mapRedJobID);
            if(runningJob != null) {
                Counters counters = runningJob.getCounters();
                if (counters==null)
                {
                    long nullCounterCount = aggMap.get(PigWarning.NULL_COUNTER_COUNT)==null?0 : aggMap.get(PigWarning.NULL_COUNTER_COUNT);
                    nullCounterCount++;
                    aggMap.put(PigWarning.NULL_COUNTER_COUNT, nullCounterCount);
                }
                for (Enum e : PigWarning.values()) {
                    if (e != PigWarning.NULL_COUNTER_COUNT) {
                        Long currentCount = aggMap.get(e);
                        currentCount = (currentCount == null ? 0 : currentCount);
                        // This code checks if the counters is null, if it is,
                        // we need to report to the user that the number
                        // of warning aggregations may not be correct. In fact,
                        // Counters should not be null, it is
                        // a hadoop bug, once this bug is fixed in hadoop, the
                        // null handling code should never be hit.
                        // See Pig-943
                        if (counters != null)
                            currentCount += counters.getCounter(e);
                        aggMap.put(e, currentCount);
                    }
                }
            }
        } catch (IOException ioe) {
View Full Code Here

        JobID mapRedJobID = job.getAssignedJobID();
        RunningJob runningJob = null;
        try {
            runningJob = jobClient.getJob(mapRedJobID);
            if(runningJob != null) {
                Counters counters = runningJob.getCounters();
                if (counters==null)
                {
                    long nullCounterCount = aggMap.get(PigWarning.NULL_COUNTER_COUNT)==null?0 : aggMap.get(PigWarning.NULL_COUNTER_COUNT);
                    nullCounterCount++;
                    aggMap.put(PigWarning.NULL_COUNTER_COUNT, nullCounterCount);
                }
                for (Enum e : PigWarning.values()) {
                    if (e != PigWarning.NULL_COUNTER_COUNT) {
                        Long currentCount = aggMap.get(e);
                        currentCount = (currentCount == null ? 0 : currentCount);
                        // This code checks if the counters is null, if it is,
                        // we need to report to the user that the number
                        // of warning aggregations may not be correct. In fact,
                        // Counters should not be null, it is
                        // a hadoop bug, once this bug is fixed in hadoop, the
                        // null handling code should never be hit.
                        // See Pig-943
                        if (counters != null)
                            currentCount += counters.getCounter(e);
                        aggMap.put(e, currentCount);
                    }
                }
            }
        } catch (IOException ioe) {
View Full Code Here

                if (!runningJob.getJobName().startsWith("oozie:action:")) {
                    throw new ActionExecutorException(ActionExecutorException.ErrorType.FAILED, "MR001",
                                                      "ID swap should have happened in launcher job [{0}]", action.getExternalId());
                }

                Counters counters = runningJob.getCounters();
                if (counters != null) {
                    ActionStats stats = new MRStats(counters);
                    String statsJsonString = stats.toJSON();
                    context.setVar(HADOOP_COUNTERS, statsJsonString);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapred.Counters$Group

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.