Package com.rackspacecloud.blueflood.concurrent

Examples of com.rackspacecloud.blueflood.concurrent.ThreadPoolBuilder


    public static final String ROLLUP_EVENT_NAME = "rollup".intern();
    private static ThreadPoolExecutor eventExecutors;
    private static final RollupEventEmitter instance = new RollupEventEmitter();

    private RollupEventEmitter() {
        eventExecutors = new ThreadPoolBuilder()
                .withName("RollupEventEmitter ThreadPool")
                .withCorePoolSize(numberOfWorkers)
                .withMaxPoolSize(numberOfWorkers)
                .withUnboundedQueue()
                .build();
View Full Code Here


        } catch (Exception e) {
            // pass
        }
        this.outstandingMetaReads = new ConcurrentSkipListSet<Locator>();
        this.metaReads = new ConcurrentLinkedQueue<Locator>();
        this.readThreadPoolExecutor = new ThreadPoolBuilder().withCorePoolSize(batchedReadsPipelineLimit)
                .withMaxPoolSize(batchedReadsPipelineLimit)
                .withUnboundedQueue().withName("MetaBatchedReadsThreadPool").build();

        this.batchedReads = Configuration.getInstance().getBooleanProperty(
                CoreConfig.META_CACHE_BATCHED_READS);
        this.batchedWrites = Configuration.getInstance().getBooleanProperty(
                CoreConfig.META_CACHE_BATCHED_WRITES);
        if (batchedReads) {
            this.batchedReadsTimer.schedule(new TimerTask() {
                @Override
                public void run() {
                    fetchMeta(true);
                }
            }, 0, this.batchedReadsInterval.toMillis());
        }
        this.outstandingMetaWrites = new ConcurrentSkipListSet<CacheKey>();
        this.writeThreadPoolExecutor = new ThreadPoolBuilder().withCorePoolSize(batchedWritesPipelineLimit)
                .withMaxPoolSize(batchedWritesPipelineLimit)
                .withUnboundedQueue().withName("MetaBatchedWritesThreadPool").build();
        this.metaWrites = new ConcurrentLinkedQueue<CacheKey>();

        if (batchedWrites) {
View Full Code Here

        final AsyncFunction<MetricsCollection, List<List<IMetric>>> batchSplitter;
        final AsyncFunction<List<List<IMetric>>, List<Boolean>> batchWriter;
        final AsyncFunction<MetricsCollection, MetricsCollection> rollupTypeCacher;
       
        typeAndUnitProcessor = new TypeAndUnitProcessor(
                new ThreadPoolBuilder()
                        .withName("Metric type and unit processing")
                        .withCorePoolSize(HTTP_MAX_TYPE_UNIT_PROCESSOR_THREADS)
                        .withMaxPoolSize(HTTP_MAX_TYPE_UNIT_PROCESSOR_THREADS)
                        .build(),
                metricMetadataAnalyzer
        ).withLogger(log);
       
        batchSplitter = new BatchSplitter(
                new ThreadPoolBuilder().withName("Metric batching").build(),
                BATCH_SIZE
        ).withLogger(log);

        batchWriter = new BatchWriter(
                new ThreadPoolBuilder()
                        .withName("Metric Batch Writing")
                        .withCorePoolSize(WRITE_THREADS)
                        .withMaxPoolSize(WRITE_THREADS)
                        .withUnboundedQueue()
                        .build(),
                writer,
                timeout,
                bufferedMetrics,
                context
        ).withLogger(log);

        discoveryWriter =
        new DiscoveryWriter(new ThreadPoolBuilder()
            .withName("Metric Discovery Writing")
            .withCorePoolSize(Configuration.getInstance().getIntegerProperty(CoreConfig.DISCOVERY_WRITER_MIN_THREADS))
            .withMaxPoolSize(Configuration.getInstance().getIntegerProperty(CoreConfig.DISCOVERY_WRITER_MAX_THREADS))
            .withUnboundedQueue()
            .build());


        // RollupRunnable keeps a static one of these. It would be nice if we could register it and share.
        MetadataCache rollupTypeCache = MetadataCache.createLoadingCacheInstance(
                new TimeValue(48, TimeUnit.HOURS),
                Configuration.getInstance().getIntegerProperty(CoreConfig.MAX_ROLLUP_READ_THREADS));
        rollupTypeCacher = new RollupTypeCacher(
                new ThreadPoolBuilder().withName("Rollup type persistence").build(),
                rollupTypeCache,
                true
        ).withLogger(log);

        this.defaultProcessorChain = AsyncChain
View Full Code Here

    private void init() throws Exception {
        try {
            KafkaConfig config = new KafkaConfig();
            if(config.getBooleanProperty("blueflood.enable.kafka.service")) {
                numberOfProducers = config.getIntegerProperty("blueflood.producer.count") != null ? config.getIntegerProperty("blueflood.producer.count") : DEFAULT_KAFKA_PRODUCERS;
                kafkaExecutors = new ThreadPoolBuilder()
                        .withCorePoolSize(numberOfProducers)
                        .withMaxPoolSize(numberOfProducers)
                        .withUnboundedQueue()
                        .build();
                for(int i=0;i<numberOfProducers;i++) {
View Full Code Here

        AsyncChain<DatagramPacket, ?> processor =

                // this stage deserializes the UDP datagrams. since the serialization is at our discretion (and
                // your's too), it just matters that you are able to end up with a collection of
                // com.rackspacecloud.blueflood.types.Metric.
                AsyncChain.withFunction(new DeserializeAndReleaseFunc(new ThreadPoolBuilder().withName("Packet Deserializer").build()))

                // this this stage writes a single metrics to the database.
                .withFunction(new SimpleMetricWriter(new ThreadPoolBuilder().withName("Database Writer").build()))

                // this stage updates the context, which eventually gets push to the database.
                .withFunction(new ContextUpdater(new ThreadPoolBuilder().withName("Context Updater").build(), context))
                .build();

        return processor;
    }
View Full Code Here

        );
        this.maxMetricsPerRequest = config.getIntegerProperty(HttpConfig.MAX_METRICS_PER_BATCH_QUERY);
        this.serializer = new BatchedMetricsJSONOutputSerializer();
        this.gson = new GsonBuilder().setPrettyPrinting().serializeNulls().create();
        this.parser = new JsonParser();
        this.executor = new ThreadPoolBuilder().withCorePoolSize(maxThreadsToUse).withMaxPoolSize(maxThreadsToUse)
                .withName("HTTP-BatchMetricsFetch").withBoundedQueue(maxQueueSize).build();
    }
View Full Code Here

                                                    range,
                                                    CassandraModel.CF_METRICS_5M).getPoints().size());
       
        RollupExecutionContext rec = new RollupExecutionContext(Thread.currentThread());
        SingleRollupReadContext rc = new SingleRollupReadContext(normalLocator, range, Granularity.MIN_5);
        RollupBatchWriter batchWriter = new RollupBatchWriter(new ThreadPoolBuilder().build(), rec);
        RollupRunnable rr = new RollupRunnable(rec, rc, batchWriter);
        rr.run();

        while (!rec.doneReading() && !rec.doneWriting()) {
            batchWriter.drainBatch();
View Full Code Here

                                                    range,
                                                    CassandraModel.CF_METRICS_PREAGGREGATED_5M).getPoints().size());
       
        RollupExecutionContext rec = new RollupExecutionContext(Thread.currentThread());
        SingleRollupReadContext rc = new SingleRollupReadContext(locator, range, Granularity.MIN_5);
        RollupBatchWriter batchWriter = new RollupBatchWriter(new ThreadPoolBuilder().build(), rec);
        RollupRunnable rr = new RollupRunnable(rec, rc, batchWriter);
        rr.run();
       
        // assert something in 5m for this locator.
        while (!rec.doneReading() && !rec.doneWriting()) {
View Full Code Here

public class DiscoveryWriterTest {
    @Test
    public void testProcessor() throws Exception {
        DiscoveryWriter discWriter =
                new DiscoveryWriter(new ThreadPoolBuilder()
                        .withName("Metric Discovery Writing")
                        .withCorePoolSize(10)
                        .withMaxPoolSize(10)
                        .withUnboundedQueue()
                        .withRejectedHandler(new ThreadPoolExecutor.AbortPolicy())
View Full Code Here

        EventListener elistener = new EventListener();
        //Test subscription
        emitter.on(testEventName, elistener);
        Assert.assertTrue(emitter.listeners(testEventName).contains(elistener));
        //Test concurrent emission
        ThreadPoolExecutor executors = new ThreadPoolBuilder()
                .withCorePoolSize(2)
                .withMaxPoolSize(3)
                .build();
        final RollupEvent obj1 = new RollupEvent(null, null, "payload1", "gran", 0);
        final RollupEvent obj2 = new RollupEvent(null, null, "payload2", "gran", 0);
View Full Code Here

TOP

Related Classes of com.rackspacecloud.blueflood.concurrent.ThreadPoolBuilder

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.