Package org.apache.whirr

Examples of org.apache.whirr.ClusterController


    private File launchHadoopCluster(HostDescription hostDescription, File workingDirectory)
            throws IOException, GFacHandlerException, ConfigurationException, InterruptedException, TransformerException, ParserConfigurationException {
        ClusterSpec hadoopClusterSpec =
                whirrConfigurationToClusterSpec(hostDescription, workingDirectory);
        ClusterController hadoopClusterController =
                createClusterController(hadoopClusterSpec.getServiceName());
        Cluster hadoopCluster =  hadoopClusterController.launchCluster(hadoopClusterSpec);

        logger.info(String.format("Started cluster of %s instances.\n",
                hadoopCluster.getInstances().size()));

        File siteXML = new File(workingDirectory, "hadoop-site.xml");
View Full Code Here


        return siteXML;
    }

    private ClusterController createClusterController(String serviceName){
        ClusterControllerFactory factory = new ClusterControllerFactory();
        ClusterController controller = factory.create(serviceName);

        if(controller == null){
            logger.warn("Unable to find the service {0}, using default.", serviceName);
            controller = factory.create(null);
        }
View Full Code Here

    if (System.getProperty("config") != null) {
      config.addConfiguration(new PropertiesConfiguration(System.getProperty("config")));
    }
    config.addConfiguration(new PropertiesConfiguration(this.configResource));
    clusterSpec = ClusterSpec.withTemporaryKeys(config);
    controller = new ClusterController();

    cluster = controller.launchCluster(clusterSpec);
    proxy = new HadoopProxy(clusterSpec, cluster);
    proxy.start();
View Full Code Here

        newInstanceTemplate("noop"),
        newInstanceTemplate("noop", "noop1", "noop2"),
        newInstanceTemplate("noop1", "noop3")
    ));

    ClusterController controller = new ClusterController();
    cluster = controller.launchCluster(clusterSpec);
  }
View Full Code Here

    tempSpec.setClusterName("test-cluster-for-script-exection");
    tempSpec.setProvider("stub");
    tempSpec.setIdentity("dummy");
    tempSpec.setStateStore("none");

    ClusterController controller = new ClusterController();
    Cluster tempCluster = controller.launchCluster(tempSpec);

   
    action.execute(tempSpec, tempCluster);
   
    List<StatementOnNode> executions = dryRun.getTotallyOrderedExecutions();
View Full Code Here

      return -1;
    }
  }

  public int run(InputStream in, PrintStream out, PrintStream err, ClusterSpec clusterSpec) throws Exception {
    ClusterController controller = createClusterController(clusterSpec.getServiceName());
    Cluster cluster = controller.launchCluster(clusterSpec);
    out.printf("Started cluster of %s instances\n",
      cluster.getInstances().size());
    out.println(cluster);

    Utils.printSSHConnectionDetails(out, clusterSpec, cluster, 20);
View Full Code Here

    if (System.getProperty("config") != null) {
      config.addConfiguration(new PropertiesConfiguration(System.getProperty("config")));
    }
    config.addConfiguration(new PropertiesConfiguration(this.configResource));
    clusterSpec = ClusterSpec.withTemporaryKeys(config);
    controller = new ClusterController();

    cluster = controller.launchCluster(clusterSpec);
    proxy = new HadoopProxy(clusterSpec, cluster);
    proxy.start();
View Full Code Here

  @Test
  public void testAllOptions() throws Exception {

    ClusterControllerFactory factory = mock(ClusterControllerFactory.class);
    ClusterController controller = mock(ClusterController.class);
    when(factory.create((String) any())).thenReturn(controller);

    NodeMetadata node1 = new NodeMetadataBuilder().name("name1").ids("id1")
        .location(new LocationBuilder().scope(LocationScope.PROVIDER)
          .id("location-id1").description("location-desc1").build())
        .imageId("image-id").state(NodeState.RUNNING)
        .publicAddresses(Lists.newArrayList("127.0.0.1"))
        .privateAddresses(Lists.newArrayList("127.0.0.1")).build();

    NodeMetadata node2 = new NodeMetadataBuilder().name("name2").ids("id2")
        .location(new LocationBuilder().scope(LocationScope.PROVIDER)
          .id("location-id2").description("location-desc2").build())
        .imageId("image-id").state(NodeState.RUNNING)
        .publicAddresses(Lists.newArrayList("127.0.0.2"))
        .privateAddresses(Lists.newArrayList("127.0.0.2")).build();

    when(controller.getNodes((ClusterSpec) any())).thenReturn(
        (Set) Sets.newLinkedHashSet(Lists.newArrayList(node1, node2)));
    when(controller.getInstances((ClusterSpec)any(), (ClusterStateStore)any()))
        .thenCallRealMethod();

    ClusterStateStore memStore = new MemoryClusterStateStore();
    memStore.save(createTestCluster(
      new String[]{"id1", "id2"}, new String[]{"role1", "role2"}));
View Full Code Here

 
  @Test
  public void testAllOptions() throws Exception {
   
    ClusterControllerFactory factory = mock(ClusterControllerFactory.class);
    ClusterController controller = mock(ClusterController.class);
    when(factory.create((String) any())).thenReturn(controller);
   
    DestroyClusterCommand command = new DestroyClusterCommand(factory);
    Map<String, File> keys = KeyPair.generateTemporaryFiles();
   
View Full Code Here

  }

  @Test
  public void testDestroyInstanceById() throws Exception {
    ClusterControllerFactory factory = mock(ClusterControllerFactory.class);
    ClusterController controller = mock(ClusterController.class);
    when(factory.create((String) any())).thenReturn(controller);

    DestroyInstanceCommand command = new DestroyInstanceCommand(factory);
    Map<String, File> keys = KeyPair.generateTemporaryFiles();
View Full Code Here

TOP

Related Classes of org.apache.whirr.ClusterController

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.