Package com.cloud.deploy.DeploymentPlanner

Examples of com.cloud.deploy.DeploymentPlanner.ExcludeList


                poolId = rootDiskPool.getId();
            }
        }

        DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), null, poolId, null);
        ExcludeList excludes = new ExcludeList();
        excludes.addHost(hostId);

        DeployDestination dest = null;
        while (true) {

            try {
                dest = _dpMgr.planDeployment(profile, plan, excludes, planner);
            } catch (AffinityConflictException e2) {
                s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2);
                throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict");
            }

            if (dest != null) {
                if (s_logger.isDebugEnabled()) {
                    s_logger.debug("Found destination " + dest + " for migrating to.");
                }
            } else {
                if (s_logger.isDebugEnabled()) {
                    s_logger.debug("Unable to find destination for migrating the vm " + profile);
                }
                throw new InsufficientServerCapacityException("Unable to find a server to migrate to.", host.getClusterId());
            }

            excludes.addHost(dest.getHost().getId());
            try {
                migrate(vm, srcHostId, dest);
                return;
            } catch (ResourceUnavailableException e) {
                s_logger.debug("Unable to migrate to unavailable " + dest);
View Full Code Here


    }

    @Test
    public void checkWhenDcInAvoidList() throws InsufficientServerCapacityException {
        DataCenterVO mockDc = mock(DataCenterVO.class);
        ExcludeList avoids = mock(ExcludeList.class);
        VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class);
        VMInstanceVO vm = mock(VMInstanceVO.class);
        DataCenterDeployment plan = mock(DataCenterDeployment.class);

        when(avoids.shouldAvoid(mockDc)).thenReturn(true);
        when(vmProfile.getVirtualMachine()).thenReturn(vm);
        when(vm.getDataCenterId()).thenReturn(1L);
        when(dcDao.findById(1L)).thenReturn(mockDc);

        List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);
View Full Code Here

    @Test
    public void checkStrictModeWithCurrentAccountVmsPresent() throws InsufficientServerCapacityException {
        VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class);
        DataCenterDeployment plan = mock(DataCenterDeployment.class);
        ExcludeList avoids = new ExcludeList();

        initializeForTest(vmProfile, plan);

        initializeForImplicitPlannerTest(false);

        List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);

        // Validations.
        // Check cluster 2 and 3 are not in the cluster list.
        // Host 6 and 7 should also be in avoid list.
        assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
        boolean foundNeededCluster = false;
        for (Long cluster : clusterList) {
            if (cluster != 1) {
                fail("Found a cluster that shouldn't have been present, cluster id : " + cluster);
            } else {
                foundNeededCluster = true;
            }
        }
        assertTrue("Didn't find cluster 1 in the list. It should have been present", foundNeededCluster);

        Set<Long> hostsInAvoidList = avoids.getHostsToAvoid();
        assertFalse("Host 5 shouldn't have be in the avoid list, but it is present", hostsInAvoidList.contains(5L));
        Set<Long> hostsThatShouldBeInAvoidList = new HashSet<Long>();
        hostsThatShouldBeInAvoidList.add(6L);
        hostsThatShouldBeInAvoidList.add(7L);
        assertTrue("Hosts 6 and 7 that should have been present were not found in avoid list", hostsInAvoidList.containsAll(hostsThatShouldBeInAvoidList));
View Full Code Here

    @Test
    public void checkStrictModeHostWithCurrentAccountVmsFull() throws InsufficientServerCapacityException {
        @SuppressWarnings("unchecked")
        VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class);
        DataCenterDeployment plan = mock(DataCenterDeployment.class);
        ExcludeList avoids = new ExcludeList();

        initializeForTest(vmProfile, plan);

        initializeForImplicitPlannerTest(false);

        // Mark the host 5 with current account vms to be in avoid list.
        avoids.addHost(5L);
        List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);

        // Validations.
        // Check cluster 1 and 3 are not in the cluster list.
        // Host 5 and 7 should also be in avoid list.
        assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
        boolean foundNeededCluster = false;
        for (Long cluster : clusterList) {
            if (cluster != 2) {
                fail("Found a cluster that shouldn't have been present, cluster id : " + cluster);
            } else {
                foundNeededCluster = true;
            }
        }
        assertTrue("Didn't find cluster 2 in the list. It should have been present", foundNeededCluster);

        Set<Long> hostsInAvoidList = avoids.getHostsToAvoid();
        assertFalse("Host 6 shouldn't have be in the avoid list, but it is present", hostsInAvoidList.contains(6L));
        Set<Long> hostsThatShouldBeInAvoidList = new HashSet<Long>();
        hostsThatShouldBeInAvoidList.add(5L);
        hostsThatShouldBeInAvoidList.add(7L);
        assertTrue("Hosts 5 and 7 that should have been present were not found in avoid list", hostsInAvoidList.containsAll(hostsThatShouldBeInAvoidList));
View Full Code Here

    @Test
    public void checkStrictModeNoHostsAvailable() throws InsufficientServerCapacityException {
        @SuppressWarnings("unchecked")
        VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class);
        DataCenterDeployment plan = mock(DataCenterDeployment.class);
        ExcludeList avoids = new ExcludeList();

        initializeForTest(vmProfile, plan);

        initializeForImplicitPlannerTest(false);

        // Mark the host 5 and 6 to be in avoid list.
        avoids.addHost(5L);
        avoids.addHost(6L);
        List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);

        // Validations.
        // Check cluster list is empty.
        assertTrue("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
View Full Code Here

    @Test
    public void checkPreferredModePreferredHostAvailable() throws InsufficientServerCapacityException {
        @SuppressWarnings("unchecked")
        VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class);
        DataCenterDeployment plan = mock(DataCenterDeployment.class);
        ExcludeList avoids = new ExcludeList();

        initializeForTest(vmProfile, plan);

        initializeForImplicitPlannerTest(true);

        // Mark the host 5 and 6 to be in avoid list.
        avoids.addHost(5L);
        avoids.addHost(6L);
        List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);

        // Validations.
        // Check cluster 1 and 2 are not in the cluster list.
        // Host 5 and 6 should also be in avoid list.
        assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
        boolean foundNeededCluster = false;
        for (Long cluster : clusterList) {
            if (cluster != 3) {
                fail("Found a cluster that shouldn't have been present, cluster id : " + cluster);
            } else {
                foundNeededCluster = true;
            }
        }
        assertTrue("Didn't find cluster 3 in the list. It should have been present", foundNeededCluster);

        Set<Long> hostsInAvoidList = avoids.getHostsToAvoid();
        assertFalse("Host 7 shouldn't have be in the avoid list, but it is present", hostsInAvoidList.contains(7L));
        Set<Long> hostsThatShouldBeInAvoidList = new HashSet<Long>();
        hostsThatShouldBeInAvoidList.add(5L);
        hostsThatShouldBeInAvoidList.add(6L);
        assertTrue("Hosts 5 and 6 that should have been present were not found in avoid list", hostsInAvoidList.containsAll(hostsThatShouldBeInAvoidList));
View Full Code Here

    @Test
    public void checkPreferredModeNoHostsAvailable() throws InsufficientServerCapacityException {
        @SuppressWarnings("unchecked")
        VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class);
        DataCenterDeployment plan = mock(DataCenterDeployment.class);
        ExcludeList avoids = new ExcludeList();

        initializeForTest(vmProfile, plan);

        initializeForImplicitPlannerTest(false);

        // Mark the host 5, 6 and 7 to be in avoid list.
        avoids.addHost(5L);
        avoids.addHost(6L);
        avoids.addHost(7L);
        List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);

        // Validations.
        // Check cluster list is empty.
        assertTrue("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
View Full Code Here

        }

        HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());

        boolean canRetry = true;
        ExcludeList avoids = null;
        try {
            Journal journal = start.second().getJournal();


            if (planToDeploy != null) {
                avoids = planToDeploy.getAvoids();
            }
            if (avoids == null) {
                avoids = new ExcludeList();
            }
            if (s_logger.isDebugEnabled()) {
                s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid());
            }


            boolean planChangedByVolume = false;
            boolean reuseVolume = true;
            DataCenterDeployment originalPlan = plan;

            int retry = _retry;
            while (retry-- != 0) { // It's != so that it can match -1.

                if(reuseVolume){
                    // edit plan if this vm's ROOT volume is in READY state already
                    List<VolumeVO> vols = _volsDao.findReadyRootVolumesByInstance(vm.getId());
                    for (VolumeVO vol : vols) {
                        // make sure if the templateId is unchanged. If it is changed,
                        // let planner
                        // reassign pool for the volume even if it ready.
                        Long volTemplateId = vol.getTemplateId();
                        if (volTemplateId != null && volTemplateId.longValue() != template.getId()) {
                            if (s_logger.isDebugEnabled()) {
                                s_logger.debug(vol + " of " + vm + " is READY, but template ids don't match, let the planner reassign a new pool");
                            }
                            continue;
                        }

                        StoragePool pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(vol.getPoolId());
                        if (!pool.isInMaintenance()) {
                            if (s_logger.isDebugEnabled()) {
                                s_logger.debug("Root volume is ready, need to place VM in volume's cluster");
                            }
                            long rootVolDcId = pool.getDataCenterId();
                            Long rootVolPodId = pool.getPodId();
                            Long rootVolClusterId = pool.getClusterId();
                            if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
                                Long clusterIdSpecified = planToDeploy.getClusterId();
                                if (clusterIdSpecified != null && rootVolClusterId != null) {
                                    if (rootVolClusterId.longValue() != clusterIdSpecified.longValue()) {
                                        // cannot satisfy the plan passed in to the
                                        // planner
                                        if (s_logger.isDebugEnabled()) {
                                            s_logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " + rootVolClusterId
                                                    + ", cluster specified: " + clusterIdSpecified);
                                        }
                                        throw new ResourceUnavailableException("Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for "
                                                + vm, Cluster.class, clusterIdSpecified);
                                    }
                                }
                                plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), vol.getPoolId(), null, ctx);
                            }else{
                                plan = new DataCenterDeployment(rootVolDcId, rootVolPodId, rootVolClusterId, null, vol.getPoolId(), null, ctx);
                                if (s_logger.isDebugEnabled()) {
                                    s_logger.debug(vol + " is READY, changing deployment plan to use this pool's dcId: " + rootVolDcId + " , podId: " + rootVolPodId + " , and clusterId: " + rootVolClusterId);
                                }
                                planChangedByVolume = true;
                            }
                        }
                    }
                }

                VirtualMachineProfileImpl<T> vmProfile = new VirtualMachineProfileImpl<T>(vm, template, offering, account, params);
                DeployDestination dest = null;
                try {
                    dest = _dpMgr.planDeployment(vmProfile, plan, avoids, planner);
                } catch (AffinityConflictException e2) {
                    s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2);
                    throw new CloudRuntimeException(
                            "Unable to create deployment, affinity rules associted to the VM conflict");

                }

                if (dest == null) {
                    if (planChangedByVolume) {
                        plan = originalPlan;
                        planChangedByVolume = false;
                        //do not enter volume reuse for next retry, since we want to look for resorces outside the volume's cluster
                        reuseVolume = false;
                        continue;
                    }
                    throw new InsufficientServerCapacityException("Unable to create a deployment for " + vmProfile,
                            DataCenter.class, plan.getDataCenterId(), areAffinityGroupsAssociated(vmProfile));
                }

                if (dest != null) {
                    avoids.addHost(dest.getHost().getId());
                    journal.record("Deployment found ", vmProfile, dest);
                }

                long destHostId = dest.getHost().getId();
                vm.setPodId(dest.getPod().getId());
                Long cluster_id = dest.getCluster().getId();
                ClusterDetailsVO cluster_detail_cpu =  _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio");
                ClusterDetailsVO cluster_detail_ram =  _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio");
                //storing the value of overcommit in the vm_details table for doing a capacity check in case the cluster overcommit ratio is changed.
                if (_uservmDetailsDao.findDetail(vm.getId(),"cpuOvercommitRatio") == null && ((Float.parseFloat(cluster_detail_cpu.getValue()) > 1f || Float.parseFloat(cluster_detail_ram.getValue()) > 1f)) ){
                    UserVmDetailVO vmDetail_cpu = new UserVmDetailVO(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue());
                    UserVmDetailVO vmDetail_ram = new UserVmDetailVO(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue());
                    _uservmDetailsDao.persist(vmDetail_cpu);
                    _uservmDetailsDao.persist(vmDetail_ram);
                }
                else if (_uservmDetailsDao.findDetail(vm.getId(),"cpuOvercommitRatio") != null) {
                    UserVmDetailVO vmDetail_cpu = _uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio");
                    vmDetail_cpu.setValue(cluster_detail_cpu.getValue());
                    UserVmDetailVO vmDetail_ram = _uservmDetailsDao.findDetail(vm.getId(), "memoryOvercommitRatio");
                    vmDetail_ram.setValue(cluster_detail_ram.getValue());
                    _uservmDetailsDao.update(vmDetail_cpu.getId(), vmDetail_cpu);
                    _uservmDetailsDao.update(vmDetail_ram.getId(), vmDetail_ram);
                }
                vmProfile.setCpuOvercommitRatio(Float.parseFloat(cluster_detail_cpu.getValue()));
                vmProfile.setMemoryOvercommitRatio(Float.parseFloat(cluster_detail_ram.getValue()));
                StartAnswer startAnswer = null;

                try {
                    if (!changeState(vm, Event.OperationRetry, destHostId, work, Step.Prepare)) {
                        throw new ConcurrentOperationException("Unable to update the state of the Virtual Machine");
                    }
                } catch (NoTransitionException e1) {
                    throw new ConcurrentOperationException(e1.getMessage());
                }

                try {
                    if (s_logger.isDebugEnabled()) {
                        s_logger.debug("VM is being created in podId: " + vm.getPodIdToDeployIn());
                    }
                    _networkMgr.prepare(vmProfile, dest, ctx);
                    if (vm.getHypervisorType() != HypervisorType.BareMetal) {
                        volumeMgr.prepare(vmProfile, dest);
                    }
                    //since StorageMgr succeeded in volume creation, reuse Volume for further tries until current cluster has capacity
                    if(!reuseVolume){
                        reuseVolume = true;
                    }

                    Commands cmds = null;
                    vmGuru.finalizeVirtualMachineProfile(vmProfile, dest, ctx);

                    VirtualMachineTO vmTO = hvGuru.implement(vmProfile);

                    cmds = new Commands(OnError.Stop);
                    StartCommand strtcmd = new StartCommand(vmTO, dest.getHost(), _mgmtServer.getExecuteInSequence());

                    cmds.addCommand(strtcmd);

                    vmGuru.finalizeDeployment(cmds, vmProfile, dest, ctx);


                    work = _workDao.findById(work.getId());
                    if (work == null || work.getStep() != Step.Prepare) {
                        throw new ConcurrentOperationException("Work steps have been changed: " + work);
                    }

                    _workDao.updateStep(work, Step.Starting);

                    _agentMgr.send(destHostId, cmds);

                    _workDao.updateStep(work, Step.Started);


                    startAnswer = cmds.getAnswer(StartAnswer.class);
                    if (startAnswer != null && startAnswer.getResult()) {
                        String host_guid = startAnswer.getHost_guid();
                        if( host_guid != null ) {
                            HostVO finalHost = _resourceMgr.findHostByGuid(host_guid);
                            if (finalHost == null ) {
                                throw new CloudRuntimeException("Host Guid " + host_guid + " doesn't exist in DB, something wrong here");
                            }
                            destHostId = finalHost.getId();
                        }
                        if (vmGuru.finalizeStart(vmProfile, destHostId, cmds, ctx)) {
                            syncDiskChainChange(startAnswer);
                           
                            if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) {
                                throw new ConcurrentOperationException("Unable to transition to a new state.");
                            }

                            startedVm = vm;
                            if (s_logger.isDebugEnabled()) {
                                s_logger.debug("Start completed for VM " + vm);
                            }
                            return startedVm;
                        } else {
                            if (s_logger.isDebugEnabled()) {
                                s_logger.info("The guru did not like the answers so stopping " + vm);
                            }

                            StopCommand cmd = new StopCommand(vm, _mgmtServer.getExecuteInSequence());
                            StopAnswer answer = (StopAnswer) _agentMgr.easySend(destHostId, cmd);
                           if ( answer != null ) {
                                if (vm.getType() == VirtualMachine.Type.User) {
                                    String platform = answer.getPlatform();
                                    if (platform != null) {
                                        UserVmVO userVm = _userVmDao.findById(vm.getId());
                                        _userVmDao.loadDetails(userVm);
                                        userVm.setDetail("platform",  platform);
                                        _userVmDao.saveDetails(userVm);
                                    }
                                }
                            }

                            if (answer == null || !answer.getResult()) {
                                s_logger.warn("Unable to stop " + vm + " due to " + (answer != null ? answer.getDetails() : "no answers"));
                                _haMgr.scheduleStop(vm, destHostId, WorkType.ForceStop);
                                throw new ExecutionException("Unable to stop " + vm + " so we are unable to retry the start operation");
                            }
                            throw new ExecutionException("Unable to start " + vm + " due to error in finalizeStart, not retrying");
                        }
                    }
                    s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails()));

                } catch (OperationTimedoutException e) {
                    s_logger.debug("Unable to send the start command to host " + dest.getHost());
                    if (e.isActive()) {
                        _haMgr.scheduleStop(vm, destHostId, WorkType.CheckStop);
                    }
                    canRetry = false;
                    throw new AgentUnavailableException("Unable to start " + vm.getHostName(), destHostId, e);
                } catch (ResourceUnavailableException e) {
                    s_logger.info("Unable to contact resource.", e);
                    if (!avoids.add(e)) {
                        if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
                            throw e;
                        } else {
                            s_logger.warn("unexpected ResourceUnavailableException : " + e.getScope().getName(), e);
                            throw e;
                        }
                    }
                } catch (InsufficientCapacityException e) {
                    s_logger.info("Insufficient capacity ", e);
                    if (!avoids.add(e)) {
                        if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
                            throw e;
                        } else {
                            s_logger.warn("unexpected InsufficientCapacityException : " + e.getScope().getName(), e);
                        }
View Full Code Here

                }
                // Find a suitable pool for the volume. Call the storage pool allocator to find the list of pools.
                DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
                DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(),
                        host.getClusterId(), host.getId(), null, null);
                ExcludeList avoid = new ExcludeList();
                boolean currentPoolAvailable = false;

                for (StoragePoolAllocator allocator : _storagePoolAllocators) {
                    List<StoragePool> poolList = allocator.allocateToPool(diskProfile, profile, plan, avoid,
                            StoragePoolAllocator.RETURN_UPTO_ALL);
View Full Code Here

        }

        Host host = _hostDao.findById(hostId);

        DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), null, null, null);
        ExcludeList excludes = new ExcludeList();
        excludes.addHost(hostId);

        DeployDestination dest = null;
        while (true) {

            try {
                dest = _dpMgr.planDeployment(profile, plan, excludes, planner);
            } catch (AffinityConflictException e2) {
                s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2);
                throw new CloudRuntimeException(
                        "Unable to create deployment, affinity rules associted to the VM conflict");
            }

            if (dest != null) {
                if (s_logger.isDebugEnabled()) {
                    s_logger.debug("Found destination " + dest + " for migrating to.");
                }
            } else {
                if (s_logger.isDebugEnabled()) {
                    s_logger.debug("Unable to find destination for migrating the vm " + profile);
                }
                throw new InsufficientServerCapacityException("Unable to find a server to migrate to.", host.getClusterId());
            }

            excludes.addHost(dest.getHost().getId());
            VMInstanceVO vmInstance = null;
            try {
                vmInstance = migrate(vm, srcHostId, dest);
            } catch (ResourceUnavailableException e) {
                s_logger.debug("Unable to migrate to unavailable " + dest);
View Full Code Here

TOP

Related Classes of com.cloud.deploy.DeploymentPlanner.ExcludeList

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.