Package com.cloud.deploy

Examples of com.cloud.deploy.DeployDestination


    }

    @Test(expected = CloudRuntimeException.class)
    public void testScaleVM1() throws Exception {

        DeployDestination dest = new DeployDestination(null, null, null, _host);
        long l = 1L;

        when(_vmInstanceDao.findById(anyLong())).thenReturn(_vmInstance);
        _vmMgr.migrateForScale(_vmInstance.getUuid(), l, dest, l);
View Full Code Here


    }

    @Test(expected = CloudRuntimeException.class)
    public void testScaleVM2() throws Exception {

        new DeployDestination(null, null, null, _host);
        doReturn(3L).when(_vmInstance).getId();
        when(_vmInstanceDao.findById(anyLong())).thenReturn(_vmInstance);
        ServiceOfferingVO newServiceOffering = getSvcoffering(512);
        doReturn(1L).when(_vmInstance).getHostId();
        doReturn(hostVO).when(_hostDao).findById(1L);
View Full Code Here

            for (Map.Entry<String, String> entry : storage.entrySet()) {
                vols.put(s_entityMgr.findByUuid(Volume.class, entry.getKey()), s_entityMgr.findByUuid(StoragePool.class, entry.getValue()));
            }
        }

        DeployDestination dest = new DeployDestination(zone, pod, cluster, host, vols);
        return dest;
    }
View Full Code Here

        } else {
            s_logger.debug("Skip the shutting down of network id=" + networkId);
        }

        // implement the network elements and rules again
        DeployDestination dest = new DeployDestination(_dcDao.findById(network.getDataCenterId()), null, null, null);

        s_logger.debug("Implementing the network " + network + " elements and resources as a part of network restart");
        NetworkOfferingVO offering = _networkOfferingDao.findById(network.getNetworkOfferingId());

        try {
View Full Code Here

        txn.commit();

        // if the network offering has persistent set to true, implement the network
        if ( createNetwork && requiredOfferings.get(0).getIsPersistent() ) {
            DataCenter zone = _dcDao.findById(zoneId);
            DeployDestination dest = new DeployDestination(zone, null, null, null);
            Account callerAccount = UserContext.current().getCaller();
            UserVO callerUser = _userDao.findById(UserContext.current().getCallerUserId());
            Journal journal = new Journal.LogJournal("Implementing " + guestNetwork, s_logger);
            ReservationContext context = new ReservationContextImpl(UUID.randomUUID().toString(), journal, callerUser, callerAccount);
            s_logger.debug("Implementing network " + guestNetwork + " as a part of network provision for persistent network");
View Full Code Here

            ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException {
               
                VirtualMachine vm = vmProfile.getVirtualMachine();
                DataCenter dc = _configMgr.getZone(network.getDataCenterId());
                Host host = _hostDao.findById(vm.getHostId());
                DeployDestination dest = new DeployDestination(dc, null, null, host);
               
                NicProfile nic = getNicProfileForVm(network, requested, vm);
               
                //1) allocate nic (if needed) Always allocate if it is a user vm
                if (nic == null || (vmProfile.getType() == VirtualMachine.Type.User)) {
View Full Code Here

            }

        }

        while (true) {
            DeployDestination dest = null;
            try {
                dest = _dpMgr.planDeployment(vmProfile, plan, exclude, plannerToUse);
            } catch (AffinityConflictException e) {
                throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict");
            }

            if (dest != null) {
                String reservationId = _dpMgr.finalizeReservation(dest, vmProfile, plan, exclude, plannerToUse);
                if (reservationId != null) {
                    return reservationId;
                } else {
                    if (s_logger.isDebugEnabled()) {
                        s_logger.debug("Cannot finalize the VM reservation for this destination found, retrying");
                    }
                    exclude.addHost(dest.getHost().getId());
                    continue;
                }
            } else if (planChangedByReadyVolume) {
                // we could not reserve in the Volume's cluster - let the deploy
                // call retry it.
View Full Code Here

                    }
                }

                Account owner = _entityMgr.findById(Account.class, vm.getAccountId());
                VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, template, offering, owner, params);
                DeployDestination dest = null;
                try {
                    dest = _dpMgr.planDeployment(vmProfile, plan, avoids, planner);
                } catch (AffinityConflictException e2) {
                    s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2);
                    throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict");

                }

                if (dest == null) {
                    if (planChangedByVolume) {
                        plan = originalPlan;
                        planChangedByVolume = false;
                        //do not enter volume reuse for next retry, since we want to look for resorces outside the volume's cluster
                        reuseVolume = false;
                        continue;
                    }
                    throw new InsufficientServerCapacityException("Unable to create a deployment for " + vmProfile, DataCenter.class, plan.getDataCenterId(),
                            areAffinityGroupsAssociated(vmProfile));
                }

                if (dest != null) {
                    avoids.addHost(dest.getHost().getId());
                    journal.record("Deployment found ", vmProfile, dest);
                }

                long destHostId = dest.getHost().getId();
                vm.setPodIdToDeployIn(dest.getPod().getId());
                Long cluster_id = dest.getCluster().getId();
                ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio");
                ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio");
                //storing the value of overcommit in the vm_details table for doing a capacity check in case the cluster overcommit ratio is changed.
                if (_uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") == null &&
                        ((Float.parseFloat(cluster_detail_cpu.getValue()) > 1f || Float.parseFloat(cluster_detail_ram.getValue()) > 1f))) {
                    _uservmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue(), true);
                    _uservmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue(), true);
                } else if (_uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") != null) {
                    _uservmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue(), true);
                    _uservmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue(), true);
                }
                vmProfile.setCpuOvercommitRatio(Float.parseFloat(cluster_detail_cpu.getValue()));
                vmProfile.setMemoryOvercommitRatio(Float.parseFloat(cluster_detail_ram.getValue()));
                StartAnswer startAnswer = null;

                try {
                    if (!changeState(vm, Event.OperationRetry, destHostId, work, Step.Prepare)) {
                        throw new ConcurrentOperationException("Unable to update the state of the Virtual Machine");
                    }
                } catch (NoTransitionException e1) {
                    throw new ConcurrentOperationException(e1.getMessage());
                }

                try {
                    if (s_logger.isDebugEnabled()) {
                        s_logger.debug("VM is being created in podId: " + vm.getPodIdToDeployIn());
                    }
                    _networkMgr.prepare(vmProfile, dest, ctx);
                    if (vm.getHypervisorType() != HypervisorType.BareMetal) {
                        volumeMgr.prepare(vmProfile, dest);
                    }
                    //since StorageMgr succeeded in volume creation, reuse Volume for further tries until current cluster has capacity
                    if (!reuseVolume) {
                        reuseVolume = true;
                    }

                    Commands cmds = null;
                    vmGuru.finalizeVirtualMachineProfile(vmProfile, dest, ctx);

                    VirtualMachineTO vmTO = hvGuru.implement(vmProfile);

                    handlePath(vmTO.getDisks(), vm.getHypervisorType());

                    cmds = new Commands(Command.OnError.Stop);

                    cmds.addCommand(new StartCommand(vmTO, dest.getHost(), getExecuteInSequence(vm.getHypervisorType())));


                    vmGuru.finalizeDeployment(cmds, vmProfile, dest, ctx);

                    work = _workDao.findById(work.getId());
                    if (work == null || work.getStep() != Step.Prepare) {
                        throw new ConcurrentOperationException("Work steps have been changed: " + work);
                    }

                    _workDao.updateStep(work, Step.Starting);

                    _agentMgr.send(destHostId, cmds);

                    _workDao.updateStep(work, Step.Started);

                    startAnswer = cmds.getAnswer(StartAnswer.class);
                    if (startAnswer != null && startAnswer.getResult()) {
                        handlePath(vmTO.getDisks(), startAnswer.getIqnToPath());
                        String host_guid = startAnswer.getHost_guid();
                        if (host_guid != null) {
                            HostVO finalHost = _resourceMgr.findHostByGuid(host_guid);
                            if (finalHost == null) {
                                throw new CloudRuntimeException("Host Guid " + host_guid + " doesn't exist in DB, something wrong here");
                            }
                            destHostId = finalHost.getId();
                        }
                        if (vmGuru.finalizeStart(vmProfile, destHostId, cmds, ctx)) {
                            syncDiskChainChange(startAnswer);

                            if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) {
                                throw new ConcurrentOperationException("Unable to transition to a new state.");
                            }

                            // Update GPU device capacity
                            GPUDeviceTO gpuDevice = startAnswer.getVirtualMachine().getGpuDevice();
                            if (gpuDevice != null) {
                                _resourceMgr.updateGPUDetails(destHostId, gpuDevice.getGroupDetails());
                            }

                            startedVm = vm;
                            if (s_logger.isDebugEnabled()) {
                                s_logger.debug("Start completed for VM " + vm);
                            }
                            return;
                        } else {
                            if (s_logger.isDebugEnabled()) {
                                s_logger.info("The guru did not like the answers so stopping " + vm);
                            }

                            StopCommand cmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), false);
                            StopAnswer answer = (StopAnswer) _agentMgr.easySend(destHostId, cmd);
                            if ( answer != null ) {

                                if (vm.getType() == VirtualMachine.Type.User) {
                                    String platform = answer.getPlatform();
                                    if (platform != null) {
                                        Map<String,String> vmmetadata = new HashMap<String,String>();
                                        vmmetadata.put(vm.getInstanceName(), platform);
                                        syncVMMetaData(vmmetadata);
                                    }
                                }
                            }

                            if (answer == null || !answer.getResult()) {
                                s_logger.warn("Unable to stop " + vm + " due to " + (answer != null ? answer.getDetails() : "no answers"));
                                _haMgr.scheduleStop(vm, destHostId, WorkType.ForceStop);
                                throw new ExecutionException("Unable to stop " + vm + " so we are unable to retry the start operation");
                            }
                            throw new ExecutionException("Unable to start " + vm + " due to error in finalizeStart, not retrying");
                        }
                    }
                    s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails()));

                } catch (OperationTimedoutException e) {
                    s_logger.debug("Unable to send the start command to host " + dest.getHost());
                    if (e.isActive()) {
                        _haMgr.scheduleStop(vm, destHostId, WorkType.CheckStop);
                    }
                    canRetry = false;
                    throw new AgentUnavailableException("Unable to start " + vm.getHostName(), destHostId, e);
View Full Code Here

        VirtualMachineGuru vmGuru = getVmGuru(vm);

        DataCenterVO dc = _dcDao.findById(destHost.getDataCenterId());
        HostPodVO pod = _podDao.findById(destHost.getPodId());
        Cluster cluster = _clusterDao.findById(destHost.getClusterId());
        DeployDestination destination = new DeployDestination(dc, pod, cluster, destHost);

        // Create a map of which volume should go in which storage pool.
        VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
        Map<Volume, StoragePool> volumeToPoolMap = getPoolListForVolumesForMigration(profile, destHost, volumeToPool);
View Full Code Here

        DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), null, poolId, null);
        ExcludeList excludes = new ExcludeList();
        excludes.addHost(hostId);

        DeployDestination dest = null;
        while (true) {

            try {
                dest = _dpMgr.planDeployment(profile, plan, excludes, planner);
            } catch (AffinityConflictException e2) {
                s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2);
                throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict");
            }

            if (dest != null) {
                if (s_logger.isDebugEnabled()) {
                    s_logger.debug("Found destination " + dest + " for migrating to.");
                }
            } else {
                if (s_logger.isDebugEnabled()) {
                    s_logger.debug("Unable to find destination for migrating the vm " + profile);
                }
                throw new InsufficientServerCapacityException("Unable to find a server to migrate to.", host.getClusterId());
            }

            excludes.addHost(dest.getHost().getId());
            try {
                migrate(vm, srcHostId, dest);
                return;
            } catch (ResourceUnavailableException e) {
                s_logger.debug("Unable to migrate to unavailable " + dest);
View Full Code Here

TOP

Related Classes of com.cloud.deploy.DeployDestination

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.