Package com.vmware.bdd.apitypes

Examples of com.vmware.bdd.apitypes.ClusterCreate


        this.softwareManagerCollector = softwareManagerCollector;
   }

   public Map<String, Object> getClusterConfigManifest(String clusterName,
         List<String> targets, boolean needAllocIp) {
      ClusterCreate clusterConfig =
            clusterConfigMgr.getClusterConfig(clusterName, needAllocIp);
      Map<String, String> cloudProvider = resMgr.getCloudProviderAttributes();

      ClusterRead read = clusterEntityMgr.toClusterRead(clusterName, true);

      Map<String, Object> attrs = new HashMap<String, Object>();

      if (Constants.IRONFAN.equalsIgnoreCase(clusterConfig.getAppManager())) {
         SoftwareManager softwareManager = clusterConfigMgr.getSoftwareManager(clusterConfig.getAppManager());
         IronfanStack stack = (IronfanStack)filterDistroFromAppManager(softwareManager, clusterConfig.getDistro());
         CommonClusterExpandPolicy.expandDistro(clusterConfig, stack);
        
         attrs.put("cloud_provider", cloudProvider);
         attrs.put("cluster_definition", clusterConfig);        
      }
View Full Code Here


      return clusterEntityMgr.toClusterRead(clusterName);
   }

   public ClusterCreate getClusterSpec(String clusterName) {
      ClusterCreate spec = clusterConfigMgr.getClusterConfig(clusterName);
      spec.setVcClusters(null);
      spec.setTemplateId(null);
      spec.setDistroMap(null);
      spec.setSharedDatastorePattern(null);
      spec.setLocalDatastorePattern(null);
      spec.setNetworkings(null);
      spec.setRpNames(null);
      spec.setDsNames(null);
      spec.setNetworkConfig(null);
      spec.setName(null);
      spec.setDistro(null);
      spec.setValidateConfig(null);
      spec.setSpecFile(null);
      spec.setTopologyPolicy(null);
      spec.setHostToRackMap(null);
      spec.setHttpProxy(null);
      spec.setNoProxy(null);
      spec.setDistroVendor(null);
      spec.setDistroVersion(null);
      spec.setPassword(null);
      NodeGroupCreate[] groups = spec.getNodeGroups();
      if (groups != null) {
         for (NodeGroupCreate group : groups) {
            group.setVcClusters(null);
            group.getStorage().setImagestoreNamePattern(null);
            group.getStorage().setDiskstoreNamePattern(null);
View Full Code Here

      createSpec.setDistroVendor(stack.getVendor());
      createSpec.setDistroVersion(stack.getFullVersion());

      // create auto rps if vc cluster/rp is specified
      createAutoRps(createSpec);
      ClusterCreate clusterSpec =
            ClusterSpecFactory.getCustomizedSpec(createSpec, softMgr.getType());
      createSpec.verifyClusterNameLength();
      clusterSpec.validateNodeGroupNames();
      //Check the cpu, memory max configuration according vm hardware version
      if (clusterSpec != null && clusterSpec.getNodeGroups() != null) {
         for (NodeGroupCreate ng : clusterSpec.getNodeGroups()) {
            String templateVmId = clusteringService.getTemplateVmId();
            if (templateVmId != null) {
               VcResourceUtils.checkVmMaxConfiguration(templateVmId,
                     ng.getCpuNum() == null ? 0 : ng.getCpuNum(),
                     ng.getMemCapacityMB() == null ? 0 : ng.getMemCapacityMB());
            }
         }
      }
      String name = clusterSpec.getName();
      logger.info("ClusteringService, creating cluster " + name);

      List<String> dsNames = getUsedDS(clusterSpec.getDsNames());
      if (dsNames.isEmpty()) {
         throw ClusterConfigException.NO_DATASTORE_ADDED();
      }
      List<VcCluster> vcClusters = getUsedVcClusters(clusterSpec.getRpNames());
      if (vcClusters == null || vcClusters.isEmpty()) {
         throw ClusterConfigException.NO_RESOURCE_POOL_ADDED();
      }
      // validate accessibility
      validateDatastore(dsNames, vcClusters);
View Full Code Here

   public RepeatStatus executeStep(ChunkContext chunkContext,
         JobExecutionStatusHolder jobExecutionStatusHolder) throws Exception {
      StatusUpdater statusUpdator = new DefaultStatusUpdater(jobExecutionStatusHolder,
            getJobExecutionId(chunkContext));
      String clusterName = getJobParameters(chunkContext).getString(JobConstants.CLUSTER_NAME_JOB_PARAM);
      ClusterCreate clusterSpec = configMgr.getClusterConfig(clusterName);
      List<BaseNode> existingNodes = JobUtils.getExistingNodes(clusterSpec, getClusterEntityMgr());
      List<BaseNode> deletedNodes = new ArrayList<BaseNode>();
      // portgroupName -> Set<ipAddress>
      Map<String, Set<String>> occupiedIpSets = new HashMap<String, Set<String>>();
      JobUtils.separateVcUnreachableNodes(existingNodes, deletedNodes, occupiedIpSets);
View Full Code Here

      if (Constants.IRONFAN.equals(appMgrName)) {
         // we do not config any local repo for Ironfan
         return RepeatStatus.FINISHED;
      }

      ClusterCreate clusterConfig =
            clusterManager.getClusterConfigMgr().getClusterConfig(clusterName);
      String localRepoURL = clusterConfig.getLocalRepoURL();
      logger.info("Use the following URL as the local yum server:"
            + localRepoURL);

      if (!CommonUtil.isBlank(localRepoURL)) {
         // Setup local repo file on each node for ClouderaMgr/Ambari.
View Full Code Here

   public void bootstrapNode(NodeEntity node, String clusterName) {
      String targetName = node.getVmName();

      logger.info("Start to check host time.");
      ClusterCreate clusterSpec = clusterManager.getClusterSpec(clusterName);

      Set<String> hostnames = new HashSet<String>();
      hostnames.add(node.getHostName());
      SoftwareManager softManager =
            softwareManagerCollector
View Full Code Here

         getJobParameters(chunkContext).getLong(
               JobConstants.GROUP_INSTANCE_NEW_NUMBER_JOB_PARAM);
      long oldInstanceNum =
         getJobParameters(chunkContext).getLong(
               JobConstants.GROUP_INSTANCE_OLD_NUMBER_JOB_PARAM);
      ClusterCreate clusterSpec = configMgr.getClusterConfig(clusterName);
      List<BaseNode> existingNodes = JobUtils.getExistingNodes(
            clusterSpec, getClusterEntityMgr());
      List<BaseNode> deletedNodes = new ArrayList<BaseNode>();
      removeExcessiveOrWrongStatusNodes(existingNodes,
            deletedNodes, groupName, newInstanceNum, oldInstanceNum);
View Full Code Here

   private static final Logger logger = Logger.getLogger(SetPasswordForNewNodesStep.class);

   @Override
   public RepeatStatus executeStep(ChunkContext chunkContext, JobExecutionStatusHolder jobExecutionStatusHolder) {
      clusterName = getJobParameters(chunkContext).getString(JobConstants.CLUSTER_NAME_JOB_PARAM);
      ClusterCreate clusterSpec = configMgr.getClusterConfig(clusterName);
      String newPassword = clusterSpec.getPassword();

      List<NodeEntity> nodes = getNodesToBeSetPassword(chunkContext);

      if (nodes == null || nodes.isEmpty()) {
         throw TaskException.EXECUTION_FAILED("No nodes needed to set password for");
View Full Code Here

   public RepeatStatus executeStep(ChunkContext chunkContext,
         JobExecutionStatusHolder jobExecutionStatusHolder) throws Exception {
      List<BaseNode> existingNodes = getFromJobExecutionContext(chunkContext,
            JobConstants.CLUSTER_EXISTING_NODES_JOB_PARAM,
            new TypeToken<List<BaseNode>>() {}.getType());
      ClusterCreate clusterSpec = getFromJobExecutionContext(chunkContext,
            JobConstants.CLUSTER_SPEC_JOB_PARAM, ClusterCreate.class);
      UUID reservationId = clusteringService.reserveResource(clusterSpec.getName());
      putIntoJobExecutionContext(chunkContext,
            JobConstants.CLUSTER_RESOURCE_RESERVATION_ID_JOB_PARAM, reservationId);
      List<BaseNode> vNodes = clusteringService.getPlacementPlan(clusterSpec,
            existingNodes);
      putIntoJobExecutionContext(chunkContext,
View Full Code Here

   @Override
   public RepeatStatus executeStep(ChunkContext chunkContext,
         JobExecutionStatusHolder jobExecutionStatusHolder) throws Exception {

      String clusterName = getJobParameters(chunkContext).getString(JobConstants.CLUSTER_NAME_JOB_PARAM);
      ClusterCreate clusterSpec = configMgr.getClusterConfig(clusterName);
      UUID reservationId = clusteringService.reserveResource(clusterName);
      putIntoJobExecutionContext(chunkContext, JobConstants.CLUSTER_RESOURCE_RESERVATION_ID_JOB_PARAM, reservationId);
      List<BaseNode> vNodes = clusteringService.getPlacementPlan(clusterSpec, null);
      putIntoJobExecutionContext(chunkContext, JobConstants.CLUSTER_ADDED_NODES_JOB_PARAM, vNodes);
      putIntoJobExecutionContext(chunkContext, JobConstants.CLUSTER_SPEC_JOB_PARAM, clusterSpec);
View Full Code Here

TOP

Related Classes of com.vmware.bdd.apitypes.ClusterCreate

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.