Package com.sequenceiq.cloudbreak.domain

Examples of com.sequenceiq.cloudbreak.domain.Cluster


    @Autowired
    private JsonHelper jsonHelper;

    public Cluster convert(ClusterRequest clusterRequest) {
        Cluster cluster = new Cluster();
        try {
            cluster.setBlueprint(blueprintRepository.findOne(clusterRequest.getBlueprintId()));
        } catch (AccessDeniedException e) {
            throw new AccessDeniedException(String.format("Access to blueprint '%s' is denied or blueprint doesn't exist.", clusterRequest.getBlueprintId()), e);
        }
        cluster.setName(clusterRequest.getName());
        cluster.setStatus(Status.REQUESTED);
        cluster.setDescription(clusterRequest.getDescription());
        cluster.setEmailNeeded(clusterRequest.getEmailNeeded());
        return cluster;
    }
View Full Code Here


    @Override
    public void accept(Event<ClusterCreationFailure> event) {
        ClusterCreationFailure clusterCreationFailure = event.getData();
        Long clusterId = clusterCreationFailure.getClusterId();
        Cluster cluster = clusterRepository.findById(clusterId);
        MDCBuilder.buildMdcContext(cluster);
        LOGGER.info("Accepted {} event.", ReactorConfig.CLUSTER_CREATE_FAILED_EVENT, clusterId);
        String detailedMessage = clusterCreationFailure.getDetailedMessage();
        cluster.setStatus(Status.CREATE_FAILED);
        cluster.setStatusReason(detailedMessage);
        clusterRepository.save(cluster);
        if (cluster.getEmailNeeded()) {
            ambariClusterInstallerMailSenderService.sendFailEmail(cluster.getOwner());
        }
        websocketService.sendToTopicUser(cluster.getOwner(), WebsocketEndPoint.CLUSTER,
                new StatusMessage(clusterId, cluster.getName(), Status.CREATE_FAILED.name(), detailedMessage));
    }
View Full Code Here

    private InstanceMetaDataRepository metadataRepository;

    @Override
    public void accept(Event<UpdateAmbariHostsSuccess> event) {
        UpdateAmbariHostsSuccess data = event.getData();
        Cluster cluster = clusterRepository.findById(data.getClusterId());
        Set<String> hostNames = data.getHostNames();
        MDCBuilder.buildMdcContext(cluster);
        LOGGER.info("Accepted {} event.", ReactorConfig.UPDATE_AMBARI_HOSTS_SUCCESS_EVENT);
        Stack stack = stackRepository.findStackWithListsForCluster(data.getClusterId());
        for (String hostName : hostNames) {
            InstanceMetaData metadataEntry = metadataRepository.findHostInStack(stack.getId(), hostName);
            if (data.isDecommision()) {
                metadataEntry.setRemovable(true);
            } else {
                metadataEntry.setRemovable(false);
            }
            metadataRepository.save(metadataEntry);
        }
        stackUpdater.updateStackStatus(stack.getId(), Status.AVAILABLE, "");
        websocketService.sendToTopicUser(cluster.getOwner(), WebsocketEndPoint.CLUSTER,
                new StatusMessage(data.getClusterId(), cluster.getName(), Status.AVAILABLE.name()));
    }
View Full Code Here

    public void accept(Event<ClusterStatusUpdateRequest> event) {
        ClusterStatusUpdateRequest statusUpdateRequest = event.getData();
        StatusRequest statusRequest = statusUpdateRequest.getStatusRequest();
        long stackId = statusUpdateRequest.getStackId();
        Stack stack = stackRepository.findOneWithLists(stackId);
        Cluster cluster = stack.getCluster();
        MDCBuilder.buildMdcContext(cluster);
        if (StatusRequest.STOPPED.equals(statusRequest)) {
            ambariClusterConnector.stopCluster(stack);
            cluster.setStatus(Status.STOPPED);
            if (Status.STOP_REQUESTED.equals(stackRepository.findOneWithLists(stackId).getStatus())) {
                LOGGER.info("Hadoop services stopped, stopping.");
                reactor.notify(ReactorConfig.STACK_STATUS_UPDATE_EVENT,
                        Event.wrap(new StackStatusUpdateRequest(stack.getTemplate().cloudPlatform(), stackId, statusRequest)));
            }
        } else {
            boolean started = ambariClusterConnector.startCluster(stack);
            if (started) {
                LOGGER.info("Successfully started Hadoop services, setting cluster state to: {}", Status.AVAILABLE);
                cluster.setStatus(Status.AVAILABLE);
            } else {
                LOGGER.info("Failed to start Hadoop services, setting cluster state to: {}", Status.STOPPED);
                cluster.setStatus(Status.STOPPED);
            }
        }
        clusterRepository.save(cluster);
    }
View Full Code Here

    @Override
    public void accept(Event<ClusterCreationSuccess> event) {
        ClusterCreationSuccess clusterCreationSuccess = event.getData();
        Long clusterId = clusterCreationSuccess.getClusterId();
        Cluster cluster = clusterRepository.findById(clusterId);
        MDCBuilder.buildMdcContext(cluster);
        LOGGER.info("Accepted {} event.", ReactorConfig.CLUSTER_CREATE_SUCCESS_EVENT, clusterId);
        cluster.setStatus(Status.AVAILABLE);
        cluster.setStatusReason("");
        cluster.setCreationFinished(clusterCreationSuccess.getCreationFinished());
        clusterRepository.save(cluster);
        Stack stack = stackRepository.findStackWithListsForCluster(clusterId);
        Set<InstanceMetaData> instances = stack.getInstanceMetaData();
        for (InstanceMetaData instanceMetaData : instances) {
            instanceMetaData.setRemovable(false);
        }
        stackUpdater.updateStackMetaData(stack.getId(), instances);
        stackUpdater.updateStackStatus(stack.getId(), Status.AVAILABLE);

        if (cluster.getEmailNeeded()) {
            ambariClusterInstallerMailSenderService.sendSuccessEmail(cluster.getOwner(), event.getData().getAmbariIp());
        }
        websocketService.sendToTopicUser(cluster.getOwner(), WebsocketEndPoint.CLUSTER,
                new StatusMessage(clusterId, cluster.getName(), Status.AVAILABLE.name()));
    }
View Full Code Here

    private RetryingStackUpdater stackUpdater;

    @Override
    public void accept(Event<UpdateAmbariHostsFailure> event) {
        UpdateAmbariHostsFailure data = event.getData();
        Cluster cluster = clusterRepository.findById(data.getClusterId());
        MDCBuilder.buildMdcContext(cluster);
        LOGGER.info("Accepted {} event.", ReactorConfig.UPDATE_AMBARI_HOSTS_FAILED_EVENT);
        cluster.setStatus(Status.AVAILABLE);
        cluster.setStatusReason(data.getDetailedMessage());
        clusterRepository.save(cluster);
        Stack stack = stackRepository.findStackForCluster(cluster.getId());
        stackUpdater.updateStackStatus(stack.getId(), Status.AVAILABLE);
        websocketService.sendToTopicUser(cluster.getOwner(), WebsocketEndPoint.CLUSTER, new StatusMessage(data.getClusterId(), cluster.getName(),
                "UPDATE_FAILED"));
    }
View Full Code Here

    }

    @Override
    public void updateStatus(Long stackId, StatusRequest statusRequest) {
        Stack stack = stackRepository.findOne(stackId);
        Cluster cluster = stack.getCluster();
        MDCBuilder.buildMdcContext(stack.getCluster());
        long clusterId = cluster.getId();
        Status clusterStatus = cluster.getStatus();
        Status stackStatus = stack.getStatus();
        if (statusRequest.equals(StatusRequest.STARTED)) {
            if (Status.START_IN_PROGRESS.equals(stackStatus)) {
                LOGGER.info("Stack is starting, set cluster state to: {}", Status.START_REQUESTED);
                cluster.setStatus(Status.START_REQUESTED);
                clusterRepository.save(cluster);
            } else {
                if (!Status.STOPPED.equals(clusterStatus)) {
                    throw new BadRequestException(
                            String.format("Cannot update the status of cluster '%s' to STARTED, because it isn't in STOPPED state.", clusterId));
                }
                if (!Status.AVAILABLE.equals(stackStatus)) {
                    throw new BadRequestException(
                            String.format("Cannot update the status of cluster '%s' to STARTED, because the stack is not AVAILABLE", clusterId));
                }
                cluster.setStatus(Status.START_IN_PROGRESS);
                clusterRepository.save(cluster);
                LOGGER.info("Publishing {} event", ReactorConfig.CLUSTER_STATUS_UPDATE_EVENT);
                reactor.notify(ReactorConfig.CLUSTER_STATUS_UPDATE_EVENT,
                        Event.wrap(new ClusterStatusUpdateRequest(stack.getId(), statusRequest)));
            }
        } else {
            if (!Status.AVAILABLE.equals(clusterStatus)) {
                throw new BadRequestException(
                        String.format("Cannot update the status of cluster '%s' to STOPPED, because it isn't in AVAILABLE state.", clusterId));
            }
            if (!Status.AVAILABLE.equals(stackStatus) && !Status.STOP_REQUESTED.equals(stackStatus)) {
                throw new BadRequestException(
                        String.format("Cannot update the status of cluster '%s' to STARTED, because the stack is not AVAILABLE", clusterId));
            }
            cluster.setStatus(Status.STOP_IN_PROGRESS);
            clusterRepository.save(cluster);
            LOGGER.info("Publishing {} event", ReactorConfig.CLUSTER_STATUS_UPDATE_EVENT);
            reactor.notify(ReactorConfig.CLUSTER_STATUS_UPDATE_EVENT,
                    Event.wrap(new ClusterStatusUpdateRequest(stack.getId(), statusRequest)));
        }
View Full Code Here

                for (InstanceMetaData metaData : instanceMetaData) {
                    if (metaData.getInstanceId().equals(instance.getInstanceId())) {
                        String publicDnsName = instance.getPublicDnsName();
                        if (metaData.getAmbariServer()) {
                            stack.setAmbariIp(publicDnsName);
                            Cluster cluster = clusterRepository.findOneWithLists(stack.getCluster().getId());
                            stack.setCluster(cluster);
                            stackRepository.save(stack);
                        }
                        metaData.setPublicIp(publicDnsName);
                        instanceMetaDataRepository.save(metaData);
View Full Code Here

    private StackService stackService;

    @RequestMapping(value = "/stacks/{stackId}/cluster", method = RequestMethod.POST)
    @ResponseBody
    public ResponseEntity<String> create(@ModelAttribute("user") CbUser user, @PathVariable Long stackId, @RequestBody @Valid ClusterRequest clusterRequest) {
        Cluster cluster = clusterConverter.convert(clusterRequest);
        clusterService.create(user, stackId, cluster);
        return new ResponseEntity<>(HttpStatus.CREATED);
    }
View Full Code Here

    @RequestMapping(value = "/stacks/{stackId}/cluster", method = RequestMethod.GET)
    @ResponseBody
    public ResponseEntity<ClusterResponse> retrieveCluster(@PathVariable Long stackId) {
        Stack stack = stackService.get(stackId);
        Cluster cluster = clusterService.retrieveCluster(stackId);
        String clusterJson = clusterService.getClusterJson(stack.getAmbariIp(), stackId);
        ClusterResponse response = clusterConverter.convert(cluster, clusterJson);
        return new ResponseEntity<>(response, HttpStatus.OK);
    }
View Full Code Here

TOP

Related Classes of com.sequenceiq.cloudbreak.domain.Cluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.