Package org.apache.ambari.server

Examples of org.apache.ambari.server.AmbariException


   */
  public void scheduleBatch(RequestExecution requestExecution)
    throws AmbariException {

    if (!isSchedulerAvailable()) {
      throw new AmbariException("Scheduler unavailable.");
    }

    // Check if scheduler is running, if not start immediately before scheduling jobs
    try {
      if (!executionScheduler.isSchedulerStarted()) {
        executionScheduler.startScheduler(null);
      }
    } catch (SchedulerException e) {
      LOG.error("Unable to determine scheduler state.", e);
      throw new AmbariException("Scheduler unavailable.");
    }

    // Create and persist jobs based on batches
    JobDetail firstJobDetail = persistBatch(requestExecution);

    if (firstJobDetail == null) {
      throw new AmbariException("Unable to schedule jobs. firstJobDetail = "
        + firstJobDetail);
    }

    // Create a cron trigger for the first batch job
    // If no schedule is specified create simple trigger to fire right away
    Schedule schedule = requestExecution.getSchedule();

    if (schedule != null) {
      String triggerExpression = schedule.getScheduleExpression();

      Date startDate = null;
      Date endDate = null;
      try {
        String startTime = schedule.getStartTime();
        String endTime = schedule.getEndTime();
        startDate = startTime != null && !startTime.isEmpty() ?
          DateUtils.convertToDate(startTime) : new Date();
        endDate = endTime != null && !endTime.isEmpty() ?
          DateUtils.convertToDate(endTime) : null;
      } catch (ParseException e) {
        LOG.error("Unable to parse startTime / endTime.", e);
      }

      Trigger trigger = newTrigger()
          .withIdentity(REQUEST_EXECUTION_TRIGGER_PREFIX + "-" +
            requestExecution.getId(), ExecutionJob.LINEAR_EXECUTION_TRIGGER_GROUP)
          .withSchedule(cronSchedule(triggerExpression)
            .withMisfireHandlingInstructionFireAndProceed())
          .forJob(firstJobDetail)
          .startAt(startDate)
          .endAt(endDate)
          .build();

      try {
        executionScheduler.scheduleJob(trigger);
        LOG.debug("Scheduled trigger next fire time: " + trigger.getNextFireTime());
      } catch (SchedulerException e) {
        LOG.error("Unable to schedule request execution.", e);
        throw new AmbariException(e.getMessage());
      }

    } else {
      // Create trigger for immediate job execution
      Trigger trigger = newTrigger()
        .forJob(firstJobDetail)
        .withIdentity(REQUEST_EXECUTION_TRIGGER_PREFIX + "-" +
          requestExecution.getId(), ExecutionJob.LINEAR_EXECUTION_TRIGGER_GROUP)
        .withSchedule(simpleSchedule().withMisfireHandlingInstructionFireNow())
        .startNow()
        .build();

      try {
        executionScheduler.scheduleJob(trigger);
        LOG.debug("Scheduled trigger next fire time: " + trigger.getNextFireTime());
      } catch (SchedulerException e) {
        LOG.error("Unable to schedule request execution.", e);
        throw new AmbariException(e.getMessage());
      }
    }
  }
View Full Code Here


    if (!schedule.isEmpty()) {
      if (schedule.getStartTime() != null && !schedule.getStartTime().isEmpty()) {
        try {
          startDate = DateUtils.convertToDate(schedule.getStartTime());
        } catch (ParseException pe) {
          throw new AmbariException("Start time in invalid format. startTime "
            + "= " + schedule.getStartTime() + ", Allowed format = "
            + DateUtils.ALLOWED_DATE_FORMAT);
        }
      }
      if (schedule.getEndTime() != null && !schedule.getEndTime().isEmpty()) {
        try {
          endDate = DateUtils.convertToDate(schedule.getEndTime());
        } catch (ParseException pe) {
          throw new AmbariException("End time in invalid format. endTime "
            + "= " + schedule.getEndTime() + ", Allowed format = "
            + DateUtils.ALLOWED_DATE_FORMAT);
        }
      }
      if (endDate != null) {
        if (endDate.before(new Date())) {
          throw new AmbariException("End date should be in the future. " +
            "endDate = " + endDate);
        }
        if (startDate != null && endDate.before(startDate)) {
          throw new AmbariException("End date cannot be before start date. " +
            "startDate = " + startDate + ", endDate = " + endDate);
        }
      }
      String cronExpression = schedule.getScheduleExpression();
      if (cronExpression != null && !cronExpression.trim().isEmpty()) {
        if (!CronExpression.isValidExpression(cronExpression)) {
          throw new AmbariException("Invalid non-empty cron expression " +
            "provided. " + cronExpression);
        }
      }
    }
  }
View Full Code Here

   * Delete all jobs and triggers if possible.
   * @throws AmbariException
   */
  public void deleteAllJobs(RequestExecution requestExecution) throws AmbariException {
    if (!isSchedulerAvailable()) {
      throw new AmbariException("Scheduler unavailable.");
    }

    // Delete all jobs for this request execution
    Batch batch = requestExecution.getBatch();
    if (batch != null) {
      List<BatchRequest> batchRequests = batch.getBatchRequests();
      if (batchRequests != null) {
        for (BatchRequest batchRequest : batchRequests) {
          String jobName = getJobName(requestExecution.getId(),
            batchRequest.getOrderId());

          LOG.debug("Deleting Job, jobName = " + jobName);

          try {
            executionScheduler.deleteJob(JobKey.jobKey(jobName,
              ExecutionJob.LINEAR_EXECUTION_JOB_GROUP));
          } catch (SchedulerException e) {
            LOG.warn("Unable to delete job, " + jobName, e);
            throw new AmbariException(e.getMessage());
          }
        }
      }
    }
  }
View Full Code Here

        actionDBAccessor.setSourceScheduleForRequest(batchRequestResponse.getRequestId(), executionId);
      }

      return batchRequestResponse.getRequestId();
    } catch (Exception e) {
      throw new AmbariException("Exception occurred while performing request", e);
    }

  }
View Full Code Here

    Cluster cluster = clusters.getCluster(clusterName);
    RequestExecution requestExecution = cluster.getAllRequestExecutions().get(executionId);

    if (requestExecution == null) {
      throw new AmbariException("Unable to find request schedule with id = "
        + executionId);
    }

    requestExecution.updateBatchRequest(batchId, batchRequestResponse, statusOnly);
  }
View Full Code Here

    Cluster cluster = clusters.getCluster(clusterName);
    RequestExecution requestExecution = cluster.getAllRequestExecutions().get(executionId);

    if (requestExecution == null) {
      throw new AmbariException("Unable to find request schedule with id = "
        + executionId);
    }

    BatchSettings batchSettings = requestExecution.getBatch().getBatchSettings();
    if (batchSettings != null
View Full Code Here

    Cluster cluster = clusters.getCluster(clusterName);
    RequestExecution requestExecution = cluster.getAllRequestExecutions().get(executionId);

    if (requestExecution == null) {
      throw new AmbariException("Unable to find request schedule with id = "
        + executionId);
    }

    Batch batch = requestExecution.getBatch();
    BatchRequest firstBatchRequest = null;

    if (batch != null) {
      List<BatchRequest> batchRequests = batch.getBatchRequests();
      if (batchRequests != null && batchRequests.size() > 0) {
        Collections.sort(batchRequests);
        firstBatchRequest = batchRequests.get(0);
      }
    }

    boolean markCompleted = false;

    if (firstBatchRequest != null) {
      String jobName = getJobName(executionId, firstBatchRequest.getOrderId());
      JobKey jobKey = JobKey.jobKey(jobName, ExecutionJob.LINEAR_EXECUTION_JOB_GROUP);
      JobDetail jobDetail;
      try {
        jobDetail = executionScheduler.getJobDetail(jobKey);
      } catch (SchedulerException e) {
        LOG.warn("Unable to retrieve job details from scheduler. job: " + jobKey);
        e.printStackTrace();
        return;
      }

      if (jobDetail != null) {
        try {
          List<? extends Trigger> triggers = executionScheduler.getTriggersForJob(jobKey);
          if (triggers != null && triggers.size() > 0) {
            if (triggers.size() > 1) {
              throw new AmbariException("Too many triggers defined for job. " +
                "job: " + jobKey);
            }

            Trigger trigger = triggers.get(0);
            // Note: If next fire time is in the past, it could be a misfire
View Full Code Here

  // Get services from the given request.
  private synchronized Set<ServiceResponse> getServices(ServiceRequest request)
      throws AmbariException {
    if (request.getClusterName() == null
        || request.getClusterName().isEmpty()) {
      throw new AmbariException("Invalid arguments, cluster name"
          + " cannot be null");
    }
    Clusters clusters    = getManagementController().getClusters();
    String   clusterName = request.getClusterName();
View Full Code Here

      seenNewStates.add(newState);

      if (newState != oldState) {
        if (!State.isValidDesiredStateTransition(oldState, newState)) {
          throw new AmbariException("Invalid transition for"
              + " service"
              + ", clusterName=" + cluster.getClusterName()
              + ", clusterId=" + cluster.getClusterId()
              + ", serviceName=" + s.getName()
              + ", currentDesiredState=" + oldState
              + ", newDesiredState=" + newState);

        }
        if (!changedServices.containsKey(newState)) {
          changedServices.put(newState, new ArrayList<Service>());
        }
        changedServices.get(newState).add(s);
      }

      // TODO should we check whether all servicecomponents and
      // servicecomponenthosts are in the required desired state?

      for (ServiceComponent sc : s.getServiceComponents().values()) {
        State oldScState = sc.getDesiredState();
        if (newState != oldScState) {
          if (sc.isClientComponent() &&
              !newState.isValidClientComponentState()) {
            continue;
          }
          if (!State.isValidDesiredStateTransition(oldScState, newState)) {
            throw new AmbariException("Invalid transition for"
                + " servicecomponent"
                + ", clusterName=" + cluster.getClusterName()
                + ", clusterId=" + cluster.getClusterId()
                + ", serviceName=" + sc.getServiceName()
                + ", componentName=" + sc.getName()
                + ", currentDesiredState=" + oldScState
                + ", newDesiredState=" + newState);
          }
          if (!changedComps.containsKey(newState)) {
            changedComps.put(newState, new ArrayList<ServiceComponent>());
          }
          changedComps.get(newState).add(sc);
        }
        if (LOG.isDebugEnabled()) {
          LOG.debug("Handling update to ServiceComponent"
              + ", clusterName=" + request.getClusterName()
              + ", serviceName=" + s.getName()
              + ", componentName=" + sc.getName()
              + ", currentDesiredState=" + oldScState
              + ", newDesiredState=" + newState);
        }

        for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
          State oldSchState = sch.getState();
          if (oldSchState == State.DISABLED || oldSchState == State.UNKNOWN) {
            //Ignore host components updates in this state
            if (LOG.isDebugEnabled()) {
              LOG.debug("Ignoring ServiceComponentHost"
                  + ", clusterName=" + request.getClusterName()
                  + ", serviceName=" + s.getName()
                  + ", componentName=" + sc.getName()
                  + ", hostname=" + sch.getHostName()
                  + ", currentState=" + oldSchState
                  + ", newDesiredState=" + newState);
            }
            continue;
          }
         
          if (newState == oldSchState) {
            ignoredScHosts.add(sch);
            if (LOG.isDebugEnabled()) {
              LOG.debug("Ignoring ServiceComponentHost"
                  + ", clusterName=" + request.getClusterName()
                  + ", serviceName=" + s.getName()
                  + ", componentName=" + sc.getName()
                  + ", hostname=" + sch.getHostName()
                  + ", currentState=" + oldSchState
                  + ", newDesiredState=" + newState);
            }
            continue;
          }
         
          MaintenanceState schMaint = controller.getEffectiveMaintenanceState(sch);
          if (MaintenanceState.ON == schMaint ||
              (requests.size() > 1 && MaintenanceState.OFF != schMaint)) {
            ignoredScHosts.add(sch);
            if (LOG.isDebugEnabled()) {
              LOG.debug("Ignoring " + schMaint + " ServiceComponentHost"
                  + ", clusterName=" + request.getClusterName()
                  + ", serviceName=" + s.getName()
                  + ", componentName=" + sc.getName()
                  + ", hostname=" + sch.getHostName());
            }
            continue;
          }
          Host host = clusters.getHost(sch.getHostName());

          if (schMaint == MaintenanceState.IMPLIED_FROM_HOST
             && host != null
             && host.getMaintenanceState(cluster.getClusterId()) != MaintenanceState.OFF) {

            // Host is in Passive mode, ignore the SCH
            ignoredScHosts.add(sch);
            LOG.info("Ignoring ServiceComponentHost since "
              + "the host is in passive mode"
              + ", clusterName=" + request.getClusterName()
              + ", serviceName=" + s.getName()
              + ", componentName=" + sc.getName()
              + ", hostname=" + sch.getHostName());
            continue;
          }
         
         
          if (sc.isClientComponent() &&
              !newState.isValidClientComponentState()) {
            continue;
          }
          /**
           * This is hack for now wherein we don't fail if the
           * sch is in INSTALL_FAILED
           */
          if (! isValidStateTransition(requestStages, oldSchState, newState, sch)) {
            String error = "Invalid transition for"
                + " servicecomponenthost"
                + ", clusterName=" + cluster.getClusterName()
                + ", clusterId=" + cluster.getClusterId()
                + ", serviceName=" + sch.getServiceName()
                + ", componentName=" + sch.getServiceComponentName()
                + ", hostname=" + sch.getHostName()
                + ", currentState=" + oldSchState
                + ", newDesiredState=" + newState;
            StackId sid = cluster.getDesiredStackVersion();

            if ( ambariMetaInfo.getComponentCategory(
                sid.getStackName(), sid.getStackVersion(), sc.getServiceName(),
                sch.getServiceComponentName()).isMaster()) {
              throw new AmbariException(error);
            } else {
              LOG.warn("Ignoring: " + error);
              continue;
            }
          }
View Full Code Here

    Set<Service> removable = new HashSet<Service>();
   
    for (ServiceRequest serviceRequest : request) {
      if (StringUtils.isEmpty(serviceRequest.getClusterName()) || StringUtils.isEmpty(serviceRequest.getServiceName())) {
        // FIXME throw correct error
        throw new AmbariException("invalid arguments");
      } else {
       
        Service service = clusters.getCluster(
            serviceRequest.getClusterName()).getService(
                serviceRequest.getServiceName());
       
        if (!service.getDesiredState().isRemovableState()) {
          throw new AmbariException("Cannot remove " + service.getName() + ". Desired state " +
              service.getDesiredState() + " is not removable.  Service must be stopped or disabled.");
        } else {
          for (ServiceComponent sc : service.getServiceComponents().values()) {
            if (!sc.canBeRemoved()) {
              throw new AmbariException ("Cannot remove " +
                  serviceRequest.getClusterName() + "/" + serviceRequest.getServiceName() +
                  ". " + sc.getName() + " is in a non-removable state.");
            }
          }
        }
View Full Code Here

TOP

Related Classes of org.apache.ambari.server.AmbariException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.