Examples of Topology


Examples of base.drawable.Topology

            strbuf.append( "\n\n" );
            this.setCoordsText( shade.getVertices(), " (ave)" );
            strbuf.append( "\n" );
   
            StringBuffer      linebuf;
            Topology          shade_topo;
            CategoryWeight[]  twgts;
            CategoryWeight    twgt;
            String            twgt_str;
            int               print_status;
            int               idx;
View Full Code Here

Examples of com.alu.e3.topology.model.Topology

    StringBuffer warnings = new StringBuffer();


    try {
     
      Topology tmpTopology = topology.getTopologyFromFile(srcTopologyFile.getAbsolutePath());
     
      if(logger.isDebugEnabled()) {
        logger.debug("Installing E3 on all topology targets ...");
      }
     
View Full Code Here

Examples of org.apache.cassandra.locator.TokenMetadata.Topology

        Map<String, Set<InetAddress>> dcReplicas = new HashMap<String, Set<InetAddress>>(datacenters.size())
        {{
            for (Map.Entry<String, Integer> dc : datacenters.entrySet())
                put(dc.getKey(), new HashSet<InetAddress>(dc.getValue()));
        }};
        Topology topology = tokenMetadata.getTopology();
        // all endpoints in each DC, so we can check when we have exhausted all the members of a DC
        Multimap<String, InetAddress> allEndpoints = topology.getDatacenterEndpoints();
        // all racks in a DC so we can check when we have exhausted all racks in a DC
        Map<String, Multimap<String, InetAddress>> racks = topology.getDatacenterRacks();
        assert !allEndpoints.isEmpty() && !racks.isEmpty() : "not aware of any cluster members";

        // tracks the racks we have already placed replicas in
        Map<String, Set<String>> seenRacks = new HashMap<String, Set<String>>(datacenters.size())
        {{
View Full Code Here

Examples of org.apache.cassandra.locator.TokenMetadata.Topology

        Map<String, Set<InetAddress>> dcReplicas = new HashMap<String, Set<InetAddress>>(datacenters.size())
        {{
            for (Map.Entry<String, Integer> dc : datacenters.entrySet())
                put(dc.getKey(), new HashSet<InetAddress>(dc.getValue()));
        }};
        Topology topology = tokenMetadata.getTopology();
        // all endpoints in each DC, so we can check when we have exhausted all the members of a DC
        Multimap<String, InetAddress> allEndpoints = topology.getDatacenterEndpoints();
        // all racks in a DC so we can check when we have exhausted all racks in a DC
        Map<String, Multimap<String, InetAddress>> racks = topology.getDatacenterRacks();
        assert !allEndpoints.isEmpty() && !racks.isEmpty() : "not aware of any cluster members";

        // tracks the racks we have already placed replicas in
        Map<String, Set<String>> seenRacks = new HashMap<String, Set<String>>(datacenters.size())
        {{
View Full Code Here

Examples of org.apache.geronimo.datastore.impl.remote.messaging.Topology

        Node node = (Node) node2GB.getTarget();
        // The second ServerNode joins the first one.
        node.join(primaryNode);
       
        // Sets the topology.
        Topology topology = new Topology();
        PathWeight weight = new PathWeight(10);
        NodePath path = new NodePath(primaryNode, secondaryNode, weight, weight);
        topology.addPath(path);

        kernel1.setAttribute(node1Name, "Topology", topology);
        kernel2.setAttribute(node2Name, "Topology", topology);
    }
View Full Code Here

Examples of org.apache.hadoop.gateway.topology.Topology

  private static void contribute(
      DeploymentContext context,
      Map<String,List<ProviderDeploymentContributor>> providers,
      Map<String,List<ServiceDeploymentContributor>> services ) {
      Topology topology = context.getTopology();
    for( Provider provider : topology.getProviders() ) {
      ProviderDeploymentContributor contributor = getProviderContributor( providers, provider.getRole(), provider.getName() );
      if( contributor != null && provider.isEnabled() ) {
        try {
          contributor.contributeProvider( context, provider );
        } catch( Exception e ) {
          // Maybe it makes sense to throw exception
          log.failedToContributeProvider( provider.getName(), provider.getRole(), e );
          throw new DeploymentException("Failed to contribute provider.", e);
        }
      }
    }
    for( Service service : topology.getServices() ) {
      ServiceDeploymentContributor contributor = getServiceContributor( service.getRole(), null );
      if( contributor != null ) {
        try {
          contributor.contributeService( context, service );
          if (gatewayServices != null) {
            ServiceRegistry sr = (ServiceRegistry) gatewayServices.getService(GatewayServices.SERVICE_REGISTRY_SERVICE);
            if (sr != null) {
              String regCode = sr.getRegistrationCode(topology.getName());
              sr.registerService(regCode, topology.getName(), service.getRole(), service.getUrl() );
            }
          }
        } catch( Exception e ) {
          // Maybe it makes sense to throw exception
          log.failedToContributeService( service.getName(), service.getRole(), e );
View Full Code Here

Examples of org.apache.hadoop.gateway.topology.Topology

      kickMonitor( monitor );

      Collection<Topology> topologies = provider.getTopologies();
      assertThat( topologies, notNullValue() );
      assertThat( topologies.size(), is( 1 ) );
      Topology topology = topologies.iterator().next();
      assertThat( topology.getName(), is( "one" ) );
      assertThat( topology.getTimestamp(), is( time ) );
      assertThat( topoListener.events.size(), is( 1 ) );
      topoListener.events.clear();

      // Add a file to the directory.
      File two = createFile( dir, "two.xml", "org/apache/hadoop/gateway/topology/file/topology-two.xml", 1L );
      kickMonitor( monitor );
      topologies = provider.getTopologies();
      assertThat( topologies.size(), is( 2 ) );
      Set<String> names = new HashSet<String>( Arrays.asList( "one", "two" ) );
      Iterator<Topology> iterator = topologies.iterator();
      topology = iterator.next();
      assertThat( names, hasItem( topology.getName() ) );
      names.remove( topology.getName() );
      topology = iterator.next();
      assertThat( names, hasItem( topology.getName() ) );
      names.remove( topology.getName() );
      assertThat( names.size(), is( 0 ) );
      assertThat( topoListener.events.size(), is( 1 ) );
      List<TopologyEvent> events = topoListener.events.get( 0 );
      assertThat( events.size(), is( 1 ) );
      TopologyEvent event = events.get( 0 );
      assertThat( event.getType(), is( TopologyEvent.Type.CREATED ) );
      assertThat( event.getTopology(), notNullValue() );

      // Update a file in the directory.
      two = createFile( dir, "two.xml", "org/apache/hadoop/gateway/topology/file/topology-three.xml", 2L );
      kickMonitor( monitor );
      topologies = provider.getTopologies();
      assertThat( topologies.size(), is( 2 ) );
      names = new HashSet<String>( Arrays.asList( "one", "two" ) );
      iterator = topologies.iterator();
      topology = iterator.next();
      assertThat( names, hasItem( topology.getName() ) );
      names.remove( topology.getName() );
      topology = iterator.next();
      assertThat( names, hasItem( topology.getName() ) );
      names.remove( topology.getName() );
      assertThat( names.size(), is( 0 ) );

      // Remove a file from the directory.
      two.delete();
      kickMonitor( monitor );
      topologies = provider.getTopologies();
      assertThat( topologies.size(), is( 1 ) );
      topology = topologies.iterator().next();
      assertThat( topology.getName(), is( "one" ) );
      assertThat( topology.getTimestamp(), is( time ) );
    } finally {
      FileUtils.deleteQuietly( dir );
    }
  }
View Full Code Here

Examples of org.apache.hadoop.gateway.topology.Topology

      srvcs.init(config, options);
    } catch (ServiceLifecycleException e) {
      e.printStackTrace(); // I18N not required.
    }

    Topology topology = new Topology();
    topology.setName( "test-cluster" );
    Service service = new Service();
    service.setRole( "WEBHDFS" );
    service.setUrl( "http://localhost:50070/test-service-url" );
    topology.addService( service );

    Provider provider = new Provider();
    provider.setRole( "authentication" );
    provider.setName( "generic" );
    provider.setEnabled( true );
    Param param = new Param();
    param.setName( "filter" );
    param.setValue( "org.opensource.ExistingFilter" );
    provider.addParam( param );
    param = new Param();
    param.setName( "test-param-name" );
    param.setValue( "test-param-value" );
    provider.addParam( param );
    topology.addProvider( provider );

    WebArchive war = DeploymentFactory.createDeployment( config, topology );

    Document gateway = parse( war.get( "WEB-INF/gateway.xml" ).getAsset().openStream() );
    //dump( gateway );
View Full Code Here

Examples of org.apache.hadoop.gateway.topology.Topology

      srvcs.init(config, options);
    } catch (ServiceLifecycleException e) {
      e.printStackTrace(); // I18N not required.
    }

    Topology topology = new Topology();
    topology.setName( "test-cluster" );
    Service service = new Service();
    service.setRole( "WEBHDFS" );
    service.setUrl( "http://localhost:50070/test-service-url" );
    topology.addService( service );

    Provider provider = new Provider();
    provider.setRole( "authentication" );
    provider.setName( "generic" );
    provider.setEnabled( true );
    Param param; // = new ProviderParam();
    // Missing filter param.
    //param.setName( "filter" );
    //param.setValue( "org.opensource.ExistingFilter" );
    //provider.addParam( param );
    param = new Param();
    param.setName( "test-param-name" );
    param.setValue( "test-param-value" );
    provider.addParam( param );
    topology.addProvider( provider );

    Enumeration<Appender> appenders = NoOpAppender.setUp();
    try {
      DeploymentFactory.createDeployment( config, topology );
      fail( "Should have throws IllegalArgumentException" );
View Full Code Here

Examples of org.apache.hadoop.gateway.topology.Topology

      srvcs.init(config, options);
    } catch (ServiceLifecycleException e) {
      e.printStackTrace(); // I18N not required.
    }

    Topology topology = new Topology();
    topology.setName( "test-cluster" );
    Service service = new Service();
    service.setRole( "WEBHDFS" );
    service.setUrl( "http://localhost:50070/webhdfs" );
    topology.addService( service );
    Provider provider = new Provider();
    provider.setRole( "authentication" );
    provider.setEnabled( true );
    Param param = new Param();
    param.setName( "contextConfigLocation" );
    param.setValue( "classpath:app-context-security.xml" );
    provider.addParam( param );
    topology.addProvider( provider );
    Provider asserter = new Provider();
    asserter.setRole( "identity-assertion" );
    asserter.setName("Pseudo");
    asserter.setEnabled( true );
    topology.addProvider( asserter );
    Provider authorizer = new Provider();
    authorizer.setRole( "authorization" );
    authorizer.setName("AclsAuthz");
    authorizer.setEnabled( true );
    topology.addProvider( authorizer );

    WebArchive war = DeploymentFactory.createDeployment( config, topology );
    //File dir = new File( System.getProperty( "user.dir" ) );
    //File file = war.as( ExplodedExporter.class ).exportExploded( dir, "test-cluster.war" );
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.