Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.Counters


    }
  }
 
  private void updateProgressSplits() {
    double newProgress = reportedStatus.progress;
    Counters counters = reportedStatus.counters;
    if (counters == null)
      return;

    WrappedProgressSplitsBlock splitsBlock = getProgressSplitBlock();
    if (splitsBlock != null) {
      long now = clock.getTime();
      long start = getLaunchTime(); // TODO Ensure not 0

      if (start != 0 && now - start <= Integer.MAX_VALUE) {
        splitsBlock.getProgressWallclockTime().extend(newProgress,
            (int) (now - start));
      }

      Counter cpuCounter = counters.findCounter(TaskCounter.CPU_MILLISECONDS);
      if (cpuCounter != null && cpuCounter.getValue() <= Integer.MAX_VALUE) {
        splitsBlock.getProgressCPUTime().extend(newProgress,
            (int) cpuCounter.getValue()); // long to int? TODO: FIX. Same below
      }

      Counter virtualBytes = counters
        .findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES);
      if (virtualBytes != null) {
        splitsBlock.getProgressVirtualMemoryKbytes().extend(newProgress,
            (int) (virtualBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
      }

      Counter physicalBytes = counters
        .findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES);
      if (physicalBytes != null) {
        splitsBlock.getProgressPhysicalMemoryKbytes().extend(newProgress,
            (int) (physicalBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
      }
View Full Code Here


  private void initTaskAttemptStatus(TaskAttemptStatus result) {
    result.progress = 0.0f;
    result.phase = Phase.STARTING;
    result.stateString = "NEW";
    result.taskState = TaskAttemptState.NEW;
    Counters counters = EMPTY_COUNTERS;
    //    counters.groups = new HashMap<String, CounterGroup>();
    result.counters = counters;
  }
View Full Code Here

      for (int i = 0 ; i < 100 ; i++) {
        queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent(
            t.taskID, 0, TaskType.MAP, "", null)));
      }
      queueEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
          TypeConverter.fromYarn(t.jobId), 0, 10, 10, 0, 0, null, null, new Counters())));

      handleNextNEvents(jheh, 29);
      verify(mockWriter, times(0)).flush();

      handleNextNEvents(jheh, 72);
View Full Code Here

    report.setCounters(TypeConverter.toYarn(newCounters()));
    return report;
  }

  public static Counters newCounters() {
    Counters hc = new Counters();
    for (JobCounter c : JobCounter.values()) {
      hc.findCounter(c).setValue((long) (Math.random() * 1000));
    }
    for (TaskCounter c : TaskCounter.values()) {
      hc.findCounter(c).setValue((long) (Math.random() * 1000));
    }
    int nc = FileSystemCounter.values().length * 4;
    for (int i = 0; i < nc; ++i) {
      for (FileSystemCounter c : FileSystemCounter.values()) {
        hc.findCounter(FS_SCHEMES.next(), c).setValue(
            (long) (Math.random() * DT));
      }
    }
    for (int i = 0; i < 2 * 3; ++i) {
      hc.findCounter(USER_COUNTER_GROUPS.next(), USER_COUNTERS.next())
          .setValue((long) (Math.random() * 100000));
    }
    return hc;
  }
View Full Code Here

      }

      @Override
      public Counters getCounters() {
        if (report != null && report.getCounters() != null) {
          return new Counters(TypeConverter.fromYarn(report.getCounters()));
        }
        return null;
      }

      @Override
View Full Code Here

        return report;
      }

      @Override
      public Counters getCounters() {
        return new Counters(
          TypeConverter.fromYarn(report.getCounters()));
      }

      @Override
      public float getProgress() {
View Full Code Here

    };
  }

  public static Counters getCounters(
      Collection<Task> tasks) {
    Counters counters = new Counters();
    return JobImpl.incrTaskCounters(counters, tasks);
  }
View Full Code Here

    final JobId id = newJobID(appID, i);
    final String name = newJobName();
    final JobReport report = newJobReport(id);
    final Map<TaskId, Task> tasks = newTasks(id, n, m);
    final TaskCount taskCount = getTaskCount(tasks.values());
    final Counters counters = getCounters(tasks
      .values());
    final Path configFile = confFile;

    Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
    Configuration conf = new Configuration();
View Full Code Here

    Job job = runSpecTest(false, false);

    boolean succeeded = job.waitForCompletion(true);
    Assert.assertTrue(succeeded);
    Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
    Counters counters = job.getCounters();
    Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
            .getValue());
    Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES)
            .getValue());
    Assert.assertEquals(0, counters.findCounter(JobCounter.NUM_FAILED_MAPS)
            .getValue());

    /*----------------------------------------------------------------------
     * Test that Mapper speculates if MAP_SPECULATIVE is true and
     * REDUCE_SPECULATIVE is false.
     * ---------------------------------------------------------------------
     */
    job = runSpecTest(true, false);

    succeeded = job.waitForCompletion(true);
    Assert.assertTrue(succeeded);
    Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
    counters = job.getCounters();

    // The long-running map will be killed and a new one started.
    Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
            .getValue());
    Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES)
            .getValue());
    Assert.assertEquals(1, counters.findCounter(JobCounter.NUM_FAILED_MAPS)
            .getValue());

    /*----------------------------------------------------------------------
     * Test that Reducer speculates if REDUCE_SPECULATIVE is true and
     * MAP_SPECULATIVE is false.
     * ---------------------------------------------------------------------
     */
    job = runSpecTest(false, true);

    succeeded = job.waitForCompletion(true);
    Assert.assertTrue(succeeded);
    Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
    counters = job.getCounters();

    // The long-running map will be killed and a new one started.
    Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
            .getValue());
    Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES)
            .getValue());
  }
View Full Code Here

      SqoopHCatUtilities.instance().invokeOutputCommitterForLocalMode(job);
    }

    perfCounters.stopClock();

    Counters jobCounters = job.getCounters();
    // If the job has been retired, these may be unavailable.
    if (null == jobCounters) {
      displayRetiredJobNotice(LOG);
    } else {
      perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters")
        .findCounter("HDFS_BYTES_WRITTEN").getValue());
      LOG.info("Transferred " + perfCounters.toString());
      long numRecords = ConfigurationHelper.getNumMapOutputRecords(job);
      LOG.info("Retrieved " + numRecords + " records.");
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.Counters

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.