Examples of JobEvent


Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent

  }

  private static StubbedJob createRunningStubbedJob(Configuration conf,
      Dispatcher dispatcher, int numSplits, AppContext appContext) {
    StubbedJob job = createStubbedJob(conf, dispatcher, numSplits, appContext);
    job.handle(new JobEvent(job.getID(), JobEventType.JOB_INIT));
    assertJobState(job, JobStateInternal.INITED);
    job.handle(new JobStartEvent(job.getID()));
    assertJobState(job, JobStateInternal.RUNNING);
    return job;
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent

    // It's more test friendly to put it here.
    DefaultMetricsSystem.initialize("MRAppMaster");

    if (!errorHappenedShutDown) {
      // create a job event for job intialization
      JobEvent initJobEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT);
      // Send init to the job (this does NOT trigger job execution)
      // This is a synchronous call, not an event through dispatcher. We want
      // job-init to be done completely here.
      jobEventDispatcher.handle(initJobEvent);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent

   * of the jobs (the "small" ones), which is awkward with the current design.
   */
  @SuppressWarnings("unchecked")
  protected void startJobs() {
    /** create a job-start event to get this ball rolling */
    JobEvent startJobEvent = new JobStartEvent(job.getID(),
        recoveredJobStartTime);
    /** send the job-start event. this triggers the job execution. */
    dispatcher.getEventHandler().handle(startJobEvent);
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent

    public boolean internalError;

    @Override
    public void handle(Event event) {
      if (event instanceof JobEvent) {
        JobEvent je = ((JobEvent) event);
        if (JobEventType.INTERNAL_ERROR == je.getType()) {
          internalError = true;
        }
      }
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent

    @SuppressWarnings("unchecked")
    @Override
    protected void attemptLaunched(TaskAttemptId attemptID) {
      if (attemptID.getTaskId().getId() == 0) {
        getContext().getEventHandler().handle(
            new JobEvent(attemptID.getTaskId().getJobId(),
                JobEventType.JOB_KILL));
      } else {
        getContext().getEventHandler().handle(
            new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
      }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent

            handleEvent(event);
          } catch (Throwable t) {
            LOG.error("Error in handling event type " + event.getType()
                + " to the ContainreAllocator", t);
            // Kill the AM
            eventHandler.handle(new JobEvent(getJob().getID(),
              JobEventType.INTERNAL_ERROR));
            return;
          }
        }
      }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent

            "max container capability in the cluster. Killing the Job. mapResourceRequest: " +
                mapResourceRequest + " maxContainerCapability:" + supportedMaxContainerCapability;
            LOG.info(diagMsg);
            eventHandler.handle(new JobDiagnosticsUpdateEvent(
                jobId, diagMsg));
            eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
          }
        }
        //set the rounded off memory
        reqEvent.getCapability().setMemory(mapResourceRequest);
        scheduledRequests.addMap(reqEvent);//maps are immediately scheduled
      } else {
        if (reduceResourceRequest == 0) {
          reduceResourceRequest = reqEvent.getCapability().getMemory();
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(
                  org.apache.hadoop.mapreduce.TaskType.REDUCE,
                  reduceResourceRequest)));
          LOG.info("reduceResourceRequest:"+ reduceResourceRequest);
          if (reduceResourceRequest > supportedMaxContainerCapability) {
            String diagMsg = "REDUCE capability required is more than the " +
                "supported max container capability in the cluster. Killing the " +
                "Job. reduceResourceRequest: " + reduceResourceRequest +
                " maxContainerCapability:" + supportedMaxContainerCapability;
            LOG.info(diagMsg);
            eventHandler.handle(new JobDiagnosticsUpdateEvent(
                jobId, diagMsg));
            eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
          }
        }
        //set the rounded off memory
        reqEvent.getCapability().setMemory(reduceResourceRequest);
        if (reqEvent.getEarlierAttemptFailed()) {
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent

    } catch (Exception e) {
      // This can happen when the connection to the RM has gone down. Keep
      // re-trying until the retryInterval has expired.
      if (System.currentTimeMillis() - retrystartTime >= retryInterval) {
        LOG.error("Could not contact RM after " + retryInterval + " milliseconds.");
        eventHandler.handle(new JobEvent(this.getJob().getID(),
                                         JobEventType.INTERNAL_ERROR));
        throw new YarnRuntimeException("Could not contact RM after " +
                                retryInterval + " milliseconds.");
      }
      // Throw this up to the caller, which may decide to ignore it and
      // continue to attempt to contact the RM.
      throw e;
    }
    if (response.getAMCommand() != null) {
      switch(response.getAMCommand()) {
      case AM_RESYNC:
      case AM_SHUTDOWN:
        // This can happen if the RM has been restarted. If it is in that state,
        // this application must clean itself up.
        eventHandler.handle(new JobEvent(this.getJob().getID(),
                                         JobEventType.JOB_AM_REBOOT));
        throw new YarnRuntimeException("Resource Manager doesn't recognize AttemptId: " +
                                 this.getContext().getApplicationID());
      default:
        String msg =
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent

    DefaultMetricsSystem.initialize("MRAppMaster");

    boolean initFailed = false;
    if (!errorHappenedShutDown) {
      // create a job event for job intialization
      JobEvent initJobEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT);
      // Send init to the job (this does NOT trigger job execution)
      // This is a synchronous call, not an event through dispatcher. We want
      // job-init to be done completely here.
      jobEventDispatcher.handle(initJobEvent);

      // If job is still not initialized, an error happened during
      // initialization. Must complete starting all of the services so failure
      // events can be processed.
      initFailed = (((JobImpl)job).getInternalState() != JobStateInternal.INITED);

      // JobImpl's InitTransition is done (call above is synchronous), so the
      // "uber-decision" (MR-1220) has been made.  Query job and switch to
      // ubermode if appropriate (by registering different container-allocator
      // and container-launcher services/event-handlers).

      if (job.isUber()) {
        speculatorEventDispatcher.disableSpeculation();
        LOG.info("MRAppMaster uberizing job " + job.getID()
            + " in local container (\"uber-AM\") on node "
            + nmHost + ":" + nmPort + ".");
      } else {
        // send init to speculator only for non-uber jobs.
        // This won't yet start as dispatcher isn't started yet.
        dispatcher.getEventHandler().handle(
            new SpeculatorEvent(job.getID(), clock.getTime()));
        LOG.info("MRAppMaster launching normal, non-uberized, multi-container "
            + "job " + job.getID() + ".");
      }
      // Start ClientService here, since it's not initialized if
      // errorHappenedShutDown is true
      clientService.start();
    }
    //start all the components
    super.serviceStart();

    // set job classloader if configured
    MRApps.setJobClassLoader(getConfig());

    if (initFailed) {
      JobEvent initFailedEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT_FAILED);
      jobEventDispatcher.handle(initFailedEvent);
    } else {
      // All components have started, start the job.
      startJobs();
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent

   * of the jobs (the "small" ones), which is awkward with the current design.
   */
  @SuppressWarnings("unchecked")
  protected void startJobs() {
    /** create a job-start event to get this ball rolling */
    JobEvent startJobEvent = new JobStartEvent(job.getID(),
        recoveredJobStartTime);
    /** send the job-start event. this triggers the job execution. */
    dispatcher.getEventHandler().handle(startJobEvent);
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.