Package com.cloud.deploy.DeploymentPlanner

Examples of com.cloud.deploy.DeploymentPlanner.ExcludeList


        } else {
            allocator = _firstFitStoragePoolAllocator;
        }

        // Try to find a storage pool after cleanup
        ExcludeList myAvoids = new ExcludeList(avoid.getDataCentersToAvoid(), avoid.getPodsToAvoid(), avoid.getClustersToAvoid(), avoid.getHostsToAvoid(), avoid.getPoolsToAvoid());

        return allocator.allocateToPool(dskCh, vmProfile, plan, myAvoids, returnUpTo);
    }
View Full Code Here


        // Check that the allocator type is correct
        if (!allocatorIsCorrectType(dskCh)) {
            return suitablePools;
        }

        ExcludeList myAvoids = new ExcludeList(avoid.getDataCentersToAvoid(), avoid.getPodsToAvoid(), avoid.getClustersToAvoid(), avoid.getHostsToAvoid(), avoid.getPoolsToAvoid());

        if (s_logger.isDebugEnabled()) {
            s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm");
        }

        // data disk and host identified from deploying vm (attach volume case)
        if (dskCh.getType() == Volume.Type.DATADISK && plan.getHostId() != null) {
            List<StoragePoolHostVO> hostPools = _poolHostDao.listByHostId(plan.getHostId());
            for (StoragePoolHostVO hostPool: hostPools) {
                StoragePoolVO pool = _storagePoolDao.findById(hostPool.getPoolId());
                if (pool != null && pool.isLocal()) {
                    s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
                    suitablePools.add(pool);
                }

                if (suitablePools.size() == returnUpTo) {
                    break;
                }
            }
        } else {
            List<StoragePool> availablePool;
            while (!(availablePool = super.allocateToPool(dskCh, vmProfile, plan, myAvoids, 1)).isEmpty()) {
                StoragePool pool = availablePool.get(0);
                myAvoids.addPool(pool.getId());
                List<StoragePoolHostVO> hostsInSPool = _poolHostDao.listByPoolId(pool.getId());
                assert (hostsInSPool.size() == 1) : "Local storage pool should be one host per pool";

                s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
                suitablePools.add(pool);
View Full Code Here

        List<Host> suitableHosts = new ArrayList<Host>();

        VirtualMachineProfile<VMInstanceVO> vmProfile = new VirtualMachineProfileImpl<VMInstanceVO>(vm);

        DataCenterDeployment plan = new DataCenterDeployment(srcHost.getDataCenterId(), srcHost.getPodId(), srcHost.getClusterId(), null, null, null);
        ExcludeList excludes = new ExcludeList();
        excludes.addHost(srcHostId);

        for (HostAllocator allocator : _hostAllocators) {
            suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, HostAllocator.RETURN_UPTO_ALL, false);
            if (suitableHosts != null && !suitableHosts.isEmpty()) {
                break;
View Full Code Here

            return this.start(router, user, caller, params, null);
        }
        // We would try best to deploy the router to another place
        int retryIndex = 5;
        ExcludeList[] avoids = new ExcludeList[5];
        avoids[0] = new ExcludeList();
        avoids[0].addPod(routerToBeAvoid.getPodIdToDeployIn());
        avoids[1] = new ExcludeList();
        avoids[1].addCluster(_hostDao.findById(routerToBeAvoid.getHostId()).getClusterId());
        avoids[2] = new ExcludeList();
        List<VolumeVO> volumes = _volumeDao.findByInstanceAndType(routerToBeAvoid.getId(), Type.ROOT);
        if (volumes != null && volumes.size() != 0) {
            avoids[2].addPool(volumes.get(0).getPoolId());
        }
        avoids[2].addHost(routerToBeAvoid.getHostId());
        avoids[3] = new ExcludeList();
        avoids[3].addHost(routerToBeAvoid.getHostId());
        avoids[4] = new ExcludeList();

        for (int i = 0; i < retryIndex; i++) {
            if (s_logger.isTraceEnabled()) {
                s_logger.trace("Try to deploy redundant virtual router:" + router.getHostName() + ", for " + i + " time");
            }
View Full Code Here

        }
        if (plannerName == null) {
            throw new CloudRuntimeException(String.format("cannot find DeployPlannerSelector for vm[uuid:%s, hypervisorType:%s]", vm.getUuid(), vm.getHypervisorType()));
        }
       
        String reservationId = vmEntity.reserve(plannerName, plan, new ExcludeList(), new Long(callerUser.getId()).toString());
        vmEntity.deploy(reservationId, new Long(callerUser.getId()).toString(), params);

        Pair<UserVmVO, Map<VirtualMachineProfile.Param, Object>> vmParamPair = new Pair(vm, params);
        if (vm != null && vm.isUpdateParameters()) {
            // this value is not being sent to the backend; need only for api
View Full Code Here

        boolean canRetry = true;
        try {
            Journal journal = start.second().getJournal();

            ExcludeList avoids = null;
            if (planToDeploy != null) {
                avoids = planToDeploy.getAvoids();
            }
            if (avoids == null) {
                avoids = new ExcludeList();
            }
            if (s_logger.isDebugEnabled()) {
                s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid());
            }


            boolean planChangedByVolume = false;
            boolean reuseVolume = true;
            DataCenterDeployment originalPlan = plan;

            int retry = _retry;
            while (retry-- != 0) { // It's != so that it can match -1.

                if(reuseVolume){
                    // edit plan if this vm's ROOT volume is in READY state already
                    List<VolumeVO> vols = _volsDao.findReadyRootVolumesByInstance(vm.getId());
                    for (VolumeVO vol : vols) {
                        // make sure if the templateId is unchanged. If it is changed,
                        // let planner
                        // reassign pool for the volume even if it ready.
                        Long volTemplateId = vol.getTemplateId();
                        if (volTemplateId != null && volTemplateId.longValue() != template.getId()) {
                            if (s_logger.isDebugEnabled()) {
                                s_logger.debug(vol + " of " + vm + " is READY, but template ids don't match, let the planner reassign a new pool");
                            }
                            continue;
                        }

                        StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId());
                        if (!pool.isInMaintenance()) {
                            if (s_logger.isDebugEnabled()) {
                                s_logger.debug("Root volume is ready, need to place VM in volume's cluster");
                            }
                            long rootVolDcId = pool.getDataCenterId();
                            Long rootVolPodId = pool.getPodId();
                            Long rootVolClusterId = pool.getClusterId();
                            if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
                                Long clusterIdSpecified = planToDeploy.getClusterId();
                                if (clusterIdSpecified != null && rootVolClusterId != null) {
                                    if (rootVolClusterId.longValue() != clusterIdSpecified.longValue()) {
                                        // cannot satisfy the plan passed in to the
                                        // planner
                                        if (s_logger.isDebugEnabled()) {
                                            s_logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " + rootVolClusterId
                                                    + ", cluster specified: " + clusterIdSpecified);
                                        }
                                        throw new ResourceUnavailableException("Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for "
                                                + vm, Cluster.class, clusterIdSpecified);
                                    }
                                }
                                plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), vol.getPoolId(), null, ctx);
                            }else{
                                plan = new DataCenterDeployment(rootVolDcId, rootVolPodId, rootVolClusterId, null, vol.getPoolId(), null, ctx);
                                if (s_logger.isDebugEnabled()) {
                                    s_logger.debug(vol + " is READY, changing deployment plan to use this pool's dcId: " + rootVolDcId + " , podId: " + rootVolPodId + " , and clusterId: " + rootVolClusterId);
                                }
                                planChangedByVolume = true;
                            }
                        }
                    }
                }

                VirtualMachineProfileImpl<T> vmProfile = new VirtualMachineProfileImpl<T>(vm, template, offering, account, params);
                DeployDestination dest = null;
                for (DeploymentPlanner planner : _planners) {
                    if (planner.canHandle(vmProfile, plan, avoids)) {
                        dest = planner.plan(vmProfile, plan, avoids);
                    } else {
                        continue;
                    }
                    if (dest != null) {
                        avoids.addHost(dest.getHost().getId());
                        journal.record("Deployment found ", vmProfile, dest);
                        break;
                    }
                }

                if (dest == null) {
                    if (planChangedByVolume) {
                        plan = originalPlan;
                        planChangedByVolume = false;
                        //do not enter volume reuse for next retry, since we want to look for resorces outside the volume's cluster
                        reuseVolume = false;
                        continue;
                    }
                    throw new InsufficientServerCapacityException("Unable to create a deployment for " + vmProfile, DataCenter.class, plan.getDataCenterId());
                }

                long destHostId = dest.getHost().getId();
                vm.setPodId(dest.getPod().getId());

                try {
                    if (!changeState(vm, Event.OperationRetry, destHostId, work, Step.Prepare)) {
                        throw new ConcurrentOperationException("Unable to update the state of the Virtual Machine");
                    }
                } catch (NoTransitionException e1) {
                    throw new ConcurrentOperationException(e1.getMessage());
                }

                try {
                    if (s_logger.isDebugEnabled()) {
                        s_logger.debug("VM is being created in podId: " + vm.getPodIdToDeployIn());
                    }
                    _networkMgr.prepare(vmProfile, dest, ctx);
                    if (vm.getHypervisorType() != HypervisorType.BareMetal) {
                        _storageMgr.prepare(vmProfile, dest);
                    }
                    //since StorageMgr succeeded in volume creation, reuse Volume for further tries until current cluster has capacity
                    if(!reuseVolume){
                        reuseVolume = true;
                    }

                    Commands cmds = null;
                    vmGuru.finalizeVirtualMachineProfile(vmProfile, dest, ctx);

                    VirtualMachineTO vmTO = hvGuru.implement(vmProfile);

                    cmds = new Commands(OnError.Stop);
                    cmds.addCommand(new StartCommand(vmTO, dest.getHost()));

                    vmGuru.finalizeDeployment(cmds, vmProfile, dest, ctx);


                    work = _workDao.findById(work.getId());
                    if (work == null || work.getStep() != Step.Prepare) {
                        throw new ConcurrentOperationException("Work steps have been changed: " + work);
                    }
                    _workDao.updateStep(work, Step.Starting);

                    _agentMgr.send(destHostId, cmds);

                    _workDao.updateStep(work, Step.Started);


                    StartAnswer startAnswer = cmds.getAnswer(StartAnswer.class);
                    if (startAnswer != null && startAnswer.getResult()) {
                        String host_guid = startAnswer.getHost_guid();
                        if( host_guid != null ) {
                            HostVO finalHost = _resourceMgr.findHostByGuid(host_guid);
                            if (finalHost == null ) {
                                throw new CloudRuntimeException("Host Guid " + host_guid + " doesn't exist in DB, something wrong here");
                            }
                            destHostId = finalHost.getId();
                        }
                        if (vmGuru.finalizeStart(vmProfile, destHostId, cmds, ctx)) {
                            if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) {
                                throw new ConcurrentOperationException("Unable to transition to a new state.");
                            }
                            startedVm = vm;
                            if (s_logger.isDebugEnabled()) {
                                s_logger.debug("Start completed for VM " + vm);
                            }
                            return startedVm;
                        } else {
                            if (s_logger.isDebugEnabled()) {
                                s_logger.info("The guru did not like the answers so stopping " + vm);
                            }

                            StopCommand cmd = new StopCommand(vm.getInstanceName());
                            StopAnswer answer = (StopAnswer) _agentMgr.easySend(destHostId, cmd);
                            if (answer == null || !answer.getResult()) {
                                s_logger.warn("Unable to stop " + vm + " due to " + (answer != null ? answer.getDetails() : "no answers"));
                                _haMgr.scheduleStop(vm, destHostId, WorkType.ForceStop);
                                throw new ExecutionException("Unable to stop " + vm + " so we are unable to retry the start operation");
                            }
                            throw new ExecutionException("Unable to start " + vm + " due to error in finalizeStart, not retrying");
                        }
                    }
                    s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails()));

                } catch (OperationTimedoutException e) {
                    s_logger.debug("Unable to send the start command to host " + dest.getHost());
                    if (e.isActive()) {
                        _haMgr.scheduleStop(vm, destHostId, WorkType.CheckStop);
                    }
                    canRetry = false;
                    throw new AgentUnavailableException("Unable to start " + vm.getHostName(), destHostId, e);
                } catch (ResourceUnavailableException e) {
                    s_logger.info("Unable to contact resource.", e);
                    if (!avoids.add(e)) {
                        if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
                            throw e;
                        } else {
                            s_logger.warn("unexpected ResourceUnavailableException : " + e.getScope().getName(), e);
                            throw e;
                        }
                    }
                } catch (InsufficientCapacityException e) {
                    s_logger.info("Insufficient capacity ", e);
                    if (!avoids.add(e)) {
                        if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
                            throw e;
                        } else {
                            s_logger.warn("unexpected InsufficientCapacityException : " + e.getScope().getName(), e);
                        }
View Full Code Here

        }

        Host host = _hostDao.findById(hostId);

        DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), null, null, null);
        ExcludeList excludes = new ExcludeList();
        excludes.addHost(hostId);

        DeployDestination dest = null;
        while (true) {
            for (DeploymentPlanner planner : _planners) {
                if (planner.canHandle(profile, plan, excludes)) {
                    dest = planner.plan(profile, plan, excludes);
                } else {
                    continue;
                }

                if (dest != null) {
                    if (s_logger.isDebugEnabled()) {
                        s_logger.debug("Planner " + planner + " found " + dest + " for migrating to.");
                    }
                    break;
                }
                if (s_logger.isDebugEnabled()) {
                    s_logger.debug("Planner " + planner + " was unable to find anything.");
                }
            }

            if (dest == null) {
                throw new InsufficientServerCapacityException("Unable to find a server to migrate to.", host.getClusterId());
            }

            excludes.addHost(dest.getHost().getId());
            VMInstanceVO vmInstance = null;
            try {
                vmInstance = migrate(vm, srcHostId, dest);
            } catch (ResourceUnavailableException e) {
                s_logger.debug("Unable to migrate to unavailable " + dest);
View Full Code Here

    }

    @Test
    public void checkWhenDcInAvoidList() throws InsufficientServerCapacityException {
        DataCenterVO mockDc = mock(DataCenterVO.class);
        ExcludeList avoids = mock(ExcludeList.class);
        VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class);
        VMInstanceVO vm = mock(VMInstanceVO.class);
        DataCenterDeployment plan = mock(DataCenterDeployment.class);

        when(avoids.shouldAvoid(mockDc)).thenReturn(true);
        when(vmProfile.getVirtualMachine()).thenReturn(vm);
        when(vm.getDataCenterId()).thenReturn(1L);
        when(dcDao.findById(1L)).thenReturn(mockDc);

        List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);
View Full Code Here

    @Test
    public void checkStrictModeWithCurrentAccountVmsPresent() throws InsufficientServerCapacityException {
        VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class);
        DataCenterDeployment plan = mock(DataCenterDeployment.class);
        ExcludeList avoids = new ExcludeList();

        initializeForTest(vmProfile, plan);

        initializeForImplicitPlannerTest(false);

        List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);

        // Validations.
        // Check cluster 2 and 3 are not in the cluster list.
        // Host 6 and 7 should also be in avoid list.
        assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
        boolean foundNeededCluster = false;
        for (Long cluster : clusterList) {
            if (cluster != 1) {
                fail("Found a cluster that shouldn't have been present, cluster id : " + cluster);
            }else {
                foundNeededCluster = true;
            }
        }
        assertTrue("Didn't find cluster 1 in the list. It should have been present", foundNeededCluster);

        Set<Long> hostsInAvoidList = avoids.getHostsToAvoid();
        assertFalse("Host 5 shouldn't have be in the avoid list, but it is present", hostsInAvoidList.contains(5L));
        Set<Long> hostsThatShouldBeInAvoidList = new HashSet<Long>();
        hostsThatShouldBeInAvoidList.add(6L);
        hostsThatShouldBeInAvoidList.add(7L);
        assertTrue("Hosts 6 and 7 that should have been present were not found in avoid list" ,
View Full Code Here

    @Test
    public void checkStrictModeHostWithCurrentAccountVmsFull() throws InsufficientServerCapacityException {
        @SuppressWarnings("unchecked")
        VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class);
        DataCenterDeployment plan = mock(DataCenterDeployment.class);
        ExcludeList avoids = new ExcludeList();

        initializeForTest(vmProfile, plan);

        initializeForImplicitPlannerTest(false);

        // Mark the host 5 with current account vms to be in avoid list.
        avoids.addHost(5L);
        List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);

        // Validations.
        // Check cluster 1 and 3 are not in the cluster list.
        // Host 5 and 7 should also be in avoid list.
        assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
        boolean foundNeededCluster = false;
        for (Long cluster : clusterList) {
            if (cluster != 2) {
                fail("Found a cluster that shouldn't have been present, cluster id : " + cluster);
            } else {
                foundNeededCluster = true;
            }
        }
        assertTrue("Didn't find cluster 2 in the list. It should have been present", foundNeededCluster);

        Set<Long> hostsInAvoidList = avoids.getHostsToAvoid();
        assertFalse("Host 6 shouldn't have be in the avoid list, but it is present", hostsInAvoidList.contains(6L));
        Set<Long> hostsThatShouldBeInAvoidList = new HashSet<Long>();
        hostsThatShouldBeInAvoidList.add(5L);
        hostsThatShouldBeInAvoidList.add(7L);
        assertTrue("Hosts 5 and 7 that should have been present were not found in avoid list" ,
View Full Code Here

TOP

Related Classes of com.cloud.deploy.DeploymentPlanner.ExcludeList

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.