Package com.sequenceiq.cloudbreak.domain

Examples of com.sequenceiq.cloudbreak.domain.Cluster


    }

    private Stack createStack() {
        Stack stack = new Stack();
        stack.setId(1L);
        Cluster cluster = new Cluster();
        cluster.setEmailNeeded(false);
        stack.setCluster(cluster);
        stack.setName(STACK_NAME);
        stack.setCredential(new AzureCredential());
        AzureTemplate azureTemplate = new AzureTemplate();
        stack.setTemplate(azureTemplate);
View Full Code Here


    @Autowired
    private HadoopConfigurationService hadoopConfigurationService;

    public void installAmbariCluster(Stack stack) {
        Cluster cluster = stack.getCluster();
        MDCBuilder.buildMdcContext(cluster);
        try {
            LOGGER.info("Starting Ambari cluster installation [Ambari server address: {}]", stack.getAmbariIp());
            stackUpdater.updateStackStatus(stack.getId(), Status.UPDATE_IN_PROGRESS);
            cluster.setCreationStarted(new Date().getTime());
            cluster = clusterRepository.save(cluster);
            Blueprint blueprint = cluster.getBlueprint();
            AmbariClient ambariClient = createAmbariClient(stack.getAmbariIp());

            addBlueprint(stack, ambariClient, blueprint);
            Map<String, List<String>> hostGroupMappings = recommend(stack, ambariClient, blueprint.getBlueprintName());
            saveHostMetadata(cluster, hostGroupMappings);
            ambariClient.createCluster(cluster.getName(), blueprint.getBlueprintName(), hostGroupMappings);
            waitForClusterInstall(stack, ambariClient);
            clusterCreateSuccess(cluster, new Date().getTime(), stack.getAmbariIp());
        } catch (AmbariHostsUnavailableException | AmbariOperationFailedException | InvalidHostGroupHostAssociation e) {
            LOGGER.error(e.getMessage(), e);
            clusterCreateFailed(cluster, e.getMessage());
View Full Code Here

        }
    }

    public void installAmbariNode(Long stackId, Set<HostGroupAdjustmentJson> hostGroupAdjustments) {
        Stack stack = stackRepository.findOneWithLists(stackId);
        Cluster cluster = clusterRepository.findOneWithLists(stack.getCluster().getId());
        MDCBuilder.buildMdcContext(cluster);
        try {
            stackUpdater.updateStackStatus(stack.getId(), Status.UPDATE_IN_PROGRESS);
            AmbariClient ambariClient = createAmbariClient(stack.getAmbariIp());
            waitForHosts(stack, ambariClient);
View Full Code Here

        }
    }

    public void decommisionAmbariNodes(Long stackId, Set<HostGroupAdjustmentJson> hosts) {
        Stack stack = stackRepository.findOneWithLists(stackId);
        Cluster cluster = stack.getCluster();
        MDCBuilder.buildMdcContext(cluster);
        LOGGER.info("Decommision requested");
        try {
            stackUpdater.updateStackStatus(stack.getId(), Status.UPDATE_IN_PROGRESS);
            AmbariClient ambariClient = createAmbariClient(stack.getAmbariIp());
            Set<HostMetadata> metadataToRemove = new HashSet<>();
            for (HostGroupAdjustmentJson hostGroupAdjustment : hosts) {
                LOGGER.info("Decommisioning {} hosts", -1 * hostGroupAdjustment.getScalingAdjustment());
                Set<HostMetadata> hostsInHostGroup = hostMetadataRepository.findHostsInHostgroup(hostGroupAdjustment.getHostGroup(), cluster.getId());
                int i = 0;
                for (HostMetadata hostMetadata : hostsInHostGroup) {
                    String hostName = hostMetadata.getHostName();
                    InstanceMetaData instanceMetaData = instanceMetadataRepository.findHostInStack(stack.getId(), hostName);
                    if (!instanceMetaData.getAmbariServer()) {
                        if (i < -1 * hostGroupAdjustment.getScalingAdjustment()) {
                            LOGGER.info("Host '{}' will be removed from Ambari cluster", hostName);
                            metadataToRemove.add(hostMetadata);
                            Set<String> components = ambariClient.getHostComponentsMap(hostName).keySet();
                            Map<String, Integer> installRequests = new HashMap<>();
                            if (components.contains("NODEMANAGER")) {
                                Integer requestId = ambariClient.decommissionNodeManager(hostName);
                                installRequests.put("NODEMANAGER_DECOMMISION", requestId);
                            }
                            if (components.contains("DATANODE")) {
                                Integer requestId = ambariClient.decommissionDataNode(hostName);
                                installRequests.put("DATANODE_DECOMMISION", requestId);
                            }
                            List<String> componentsList = new ArrayList<>();
                            componentsList.addAll(components);
                            Map<String, Integer> stopRequests = ambariClient.stopComponentsOnHost(hostName, componentsList);
                            installRequests.putAll(stopRequests);
                            waitForAmbariOperations(stack, ambariClient, installRequests);
                            ambariClient.deleteHostComponents(hostName, componentsList);
                            ambariClient.deleteHost(hostName);

                            installRequests = new HashMap<>();
                            Integer zookeeperRequestId = ambariClient.restartServiceComponents("ZOOKEEPER", Arrays.asList("ZOOKEEPER_SERVER"));
                            installRequests.put("ZOOKEEPER", zookeeperRequestId);
                            if (ambariClient.getServiceComponentsMap().containsKey("NAGIOS")) {
                                Integer nagiosRequestId = ambariClient.restartServiceComponents("NAGIOS", Arrays.asList("NAGIOS_SERVER"));
                                installRequests.put("NAGIOS", nagiosRequestId);
                            }
                            waitForAmbariOperations(stack, ambariClient, installRequests);
                        } else {
                            break;
                        }
                        i++;
                    }
                }
            }
            cluster = clusterRepository.findOneWithLists(stack.getCluster().getId());
            cluster.getHostMetadata().removeAll(metadataToRemove);
            clusterRepository.save(cluster);
            Set<String> hostsRemoved = new HashSet<>();
            for (HostMetadata hostMetadata : metadataToRemove) {
                hostsRemoved.add(hostMetadata.getHostName());
            }
View Full Code Here

                }
            }
            if (started) {
                cloudbreakEventService.fireCloudbreakEvent(stackId, BillingStatus.BILLING_STARTED.name(), "Stack started.");
                waitForAmbariToStart(stack);
                Cluster cluster = clusterRepository.findOneWithLists(stack.getCluster().getId());
                LOGGER.info("Update stack state to: {}", Status.AVAILABLE);
                stackUpdater.updateStackStatus(stackId, Status.AVAILABLE);
                if (cluster != null && Status.START_REQUESTED.equals(cluster.getStatus())) {
                    boolean hostsJoined = waitForHostsToJoin(stack);
                    if (hostsJoined) {
                        reactor.notify(ReactorConfig.CLUSTER_STATUS_UPDATE_EVENT,
                                Event.wrap(new ClusterStatusUpdateRequest(stack.getId(), statusRequest)));
                    } else {
                        cluster.setStatus(Status.START_FAILED);
                        stack.setCluster(cluster);
                        stackRepository.save(stack);
                    }
                }
            } else {
View Full Code Here

TOP

Related Classes of com.sequenceiq.cloudbreak.domain.Cluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.