Package com.vmware.bdd.apitypes

Examples of com.vmware.bdd.apitypes.TaskRead


         JobParameters jobParameters = new JobParameters(param);
         try {
            long jobExecutionId =
                  jobManager.runJob(JobConstants.QUERY_CLUSTER_JOB_NAME,
                        jobParameters);
            TaskRead status = jobManager.getJobExecutionStatus(jobExecutionId);
            while (status.getStatus() != TaskRead.Status.COMPLETED
                  && status.getStatus() != TaskRead.Status.FAILED
                  && status.getStatus() != TaskRead.Status.ABANDONED
                  && status.getStatus() != TaskRead.Status.STOPPED) {
               Thread.sleep(1000);
               status = jobManager.getJobExecutionStatus(jobExecutionId);
            }
         } catch (Exception ex) {
            logger.error("failed to run query cluster job: " + clusterName, ex);
View Full Code Here


   @RequestMapping(value = "/task/{taskId}", method = RequestMethod.GET, produces = "application/json")
   @ResponseBody
   public TaskRead getTaskById(@PathVariable long taskId) throws Exception {

      // TODO add exception handling
      TaskRead task = jobManager.getJobExecutionStatus(taskId);
      if (task.getStatus() == TaskRead.Status.COMPLETED) {
         task.setProgress(1.0);
      }
      if (task.getType() == null) {
         task.setType(Type.INNER); // XXX just keep the interface now
      }
      return task;
   }
View Full Code Here

      JobExecution jobExecution = jobExplorer.getJobExecution(jobExecutionId);
      if (jobExecution == null) {
         throw BddException.NOT_FOUND("Task", Long.toString(jobExecutionId));
      }

      TaskRead jobStatus = new TaskRead();
      jobStatus.setId(jobExecutionId);

      //identify VHM jobs
      String jobName = jobExecution.getJobInstance().getJobName();
      if (jobName.equals(JobConstants.SET_MANUAL_ELASTICITY_JOB_NAME)) {
         jobStatus.setType(Type.VHM);
      } else if (jobName.equals(JobConstants.DELETE_CLUSTER_JOB_NAME)) {
         jobStatus.setType(Type.DELETE);
      }

      JobParameters jobParameters =
            jobExecution.getJobInstance().getJobParameters();
      String clusterName =
            jobParameters.getString(JobConstants.CLUSTER_NAME_JOB_PARAM);
      jobStatus.setTarget(clusterName);
      long subJobEnabled = jobParameters.getLong(JobConstants.SUB_JOB_ENABLED);
      if (subJobEnabled != 1) {
         jobStatus.setProgress(jobExecutionStatusHolder
               .getCurrentProgress(jobExecutionId));
      } else {
         jobStatus.setProgress(mainJobExecutionStatusHolder
               .getCurrentProgress(jobExecutionId));
      }
      Status status = null;
      switch (jobExecution.getStatus()) {
      case ABANDONED:
         status = Status.ABANDONED;
         break;
      case COMPLETED:
         status = Status.COMPLETED;
         break;
      case FAILED:
         status = Status.FAILED;
         break;
      case STARTED:
         status = Status.STARTED;
         break;
      case STARTING:
         status = Status.STARTING;
         break;
      case STOPPED:
         status = Status.STOPPED;
         break;
      case STOPPING:
         status = Status.STOPPING;
         break;
      case UNKNOWN:
      default:
         status = Status.UNKNOWN;
      }
      jobStatus.setStatus(status);
      if (subJobEnabled == 1) {
         List<NodeOperationStatus> succeedNodes =
               (ArrayList<NodeOperationStatus>) jobExecution.getExecutionContext()
                     .get(JobConstants.SUB_JOB_NODES_SUCCEED);
         List<NodeOperationStatus> failNodes =
               (ArrayList<NodeOperationStatus>) jobExecution.getExecutionContext()
                     .get(JobConstants.SUB_JOB_NODES_FAIL);
         if (succeedNodes != null) {
            jobStatus.setSucceedNodes(convert(succeedNodes));
         }
         if (failNodes != null) {
            jobStatus.setFailNodes(convert(failNodes));
         }
      }
      if (status.equals(Status.FAILED) && subJobEnabled != 1) {
         String workDir =
               TrackableTasklet.getFromJobExecutionContext(
                     jobExecution.getExecutionContext(),
                     JobConstants.CURRENT_COMMAND_WORK_DIR, String.class);
         String errorMessage =
               TrackableTasklet.getFromJobExecutionContext(
                     jobExecution.getExecutionContext(),
                     JobConstants.CURRENT_ERROR_MESSAGE, String.class);
         jobStatus.setErrorMessage(errorMessage);
         jobStatus.setWorkDir(workDir);
         logger.error("mark task as failed: " + errorMessage);
      }

      return jobStatus;
   }
View Full Code Here

      List<TaskRead> taskReads = new ArrayList<TaskRead>(taskIds.size());

      for (Long id : taskIds) {
         if (id == null)
            continue;
         TaskRead task = getJobExecutionStatus(id);
         if (task.getType() == null) {
            task.setType(Type.INNER);
         }
         if (task.getStatus() == TaskRead.Status.COMPLETED) {
            task.setProgress(1.0);
         }
         taskReads.add(task);
      }

      return taskReads;
View Full Code Here

    */
   public TaskRead waitJobExecution(long jobExecutionId, long timeoutMs)
         throws TimeoutException {
      long start = System.currentTimeMillis();
      while (true) {
         TaskRead tr = getJobExecutionStatus(jobExecutionId);
         Status status = tr.getStatus();
         if (Status.ABANDONED.equals(status) || Status.COMPLETED.equals(status)
               || Status.FAILED.equals(status) || Status.STOPPED.equals(status)) {
            return tr;
         }

View Full Code Here

         HttpHeaders headers = response.getHeaders();
         URI taskURI = headers.getLocation();
         String[] taskURIs = taskURI.toString().split("/");
         String taskId = taskURIs[taskURIs.length - 1];

         TaskRead taskRead;
         int oldProgress = 0;
         Status oldTaskStatus = null;
         Status taskStatus = null;
         int progress = 0;
         do {
            ResponseEntity<TaskRead> taskResponse =
                  restGetById(Constants.REST_PATH_TASK, taskId, TaskRead.class,
                        false);

            //task will not return exception as it has status
            taskRead = taskResponse.getBody();

            progress = (int) (taskRead.getProgress() * 100);
            taskStatus = taskRead.getStatus();

            //fix cluster deletion exception
            Type taskType = taskRead.getType();
            if ((taskType == Type.DELETE) && (taskStatus == TaskRead.Status.COMPLETED)) {
               clearScreen();
               System.out.println(taskStatus + " " + progress + "%\n");
               break;
            }

            if ((prettyOutput != null && prettyOutput.length > 0 && (taskRead.getType() == Type.VHM ? prettyOutput[0]
                  .isRefresh(true) : prettyOutput[0].isRefresh(false)))
                  || oldTaskStatus != taskStatus
                  || oldProgress != progress) {
               //clear screen and show progress every few seconds
               clearScreen();
               //output completed task summary first in the case there are several related tasks
               if (prettyOutput != null && prettyOutput.length > 0
                     && prettyOutput[0].getCompletedTaskSummary() != null) {
                  for (String summary : prettyOutput[0]
                        .getCompletedTaskSummary()) {
                     System.out.println(summary + "\n");
                  }
               }
               System.out.println(taskStatus + " " + progress + "%\n");

               if (prettyOutput != null && prettyOutput.length > 0) {
                  // print call back customize the detailed output case by case
                  prettyOutput[0].prettyOutput();
               }

               if (oldTaskStatus != taskStatus || oldProgress != progress) {
                  oldTaskStatus = taskStatus;
                  oldProgress = progress;
                  if (taskRead.getProgressMessage() != null) {
                     System.out.println(taskRead.getProgressMessage());
                  }
               }
            }
            try {
               Thread.sleep(3 * 1000);
            } catch (InterruptedException ex) {
               //ignore
            }
         } while (taskStatus != TaskRead.Status.COMPLETED
               && taskStatus != TaskRead.Status.FAILED
               && taskStatus != TaskRead.Status.ABANDONED
               && taskStatus != TaskRead.Status.STOPPED);

         String errorMsg = taskRead.getErrorMessage();
         if (!taskRead.getStatus().equals(TaskRead.Status.COMPLETED)) {
            throw new CliRestException(errorMsg);
         } else { //completed
            if (taskRead.getType().equals(Type.VHM)) {
               logger.info("task type is vhm");
               Thread.sleep(5*1000);
               if (prettyOutput != null && prettyOutput.length > 0
                     && prettyOutput[0].isRefresh(true)) {
                  //clear screen and show progress every few seconds
View Full Code Here

                     name, Constants.OUTPUT_OP_RESIZE,
                     Constants.OUTPUT_OP_RESULT_FAIL, "node group " + nodeGroup
                           + " does not exist.");
               return;
            }
            TaskRead taskRead = null;
            if (instanceNum > 0) {
               restClient.resize(name, nodeGroup, instanceNum);
            } else if (cpuNumber > 0 || memory > 0) {
               if (!cluster.getStatus().isActiveServiceStatus()) {
                  CommandsUtils.printCmdFailure(
View Full Code Here

         @CliOption(key = { "name" }, mandatory = true, help = "The cluster name") final String clusterName,
         @CliOption(key = { "disk" }, mandatory = false, unspecifiedDefaultValue = "false", specifiedDefaultValue = "true", help = "Recover a disk failure") final boolean isDiskFailure,
         @CliOption(key = { "parallel" }, mandatory = false, unspecifiedDefaultValue = "false", specifiedDefaultValue = "true", help = "Whether use parallel way to recovery node or not") final boolean parallel,
         @CliOption(key = { "nodeGroup" }, mandatory = false, help = "The node group name which failure belong to") final String nodeGroupName) {
      try {
         TaskRead taskRead = null;
         if (!isDiskFailure) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER,
                  clusterName, Constants.OUTPUT_OP_FIX,
                  Constants.OUTPUT_OP_RESULT_FAIL,
                  Constants.PARAM_SHOULD_SPECIFY_DISK);
View Full Code Here

   private void stopVmAfterStarted(final String rpPath, final String vmName,
         long jobExecutionId) throws InterruptedException, Exception {
      int retry = 0;
      while (retry <= 0) {
         Thread.sleep(50);
         TaskRead tr = jobManager.getJobExecutionStatus(jobExecutionId);
         if (TaskRead.Status.COMPLETED.equals(tr.getStatus())) {
            logger.info("===========COMPLETED============");
            break;
         }
         if (TaskRead.Status.FAILED.equals(tr.getStatus())
               || TaskRead.Status.STOPPED.equals(tr.getStatus())) {
            logger.info("===========FAILED============");
            break;
         }
         boolean stopped = stopVcVm(rpPath, vmName);
         if (stopped) {
View Full Code Here

   private void waitTaskFinished(long jobExecutionId)
         throws InterruptedException, Exception {
      while (true) {
         Thread.sleep(50);
         TaskRead tr = jobManager.getJobExecutionStatus(jobExecutionId);
         if (TaskRead.Status.COMPLETED.equals(tr.getStatus())) {
            logger.info("===========COMPLETED============");
            break;
         }
         if (TaskRead.Status.FAILED.equals(tr.getStatus())
               || TaskRead.Status.STOPPED.equals(tr.getStatus())) {
            break;
         }
      }
   }
View Full Code Here

TOP

Related Classes of com.vmware.bdd.apitypes.TaskRead

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.