Examples of KafkaConsumer


Examples of co.cask.cdap.logging.kafka.KafkaConsumer

        public void run() {
          int partition = partitioner.partition(loggingContext.getLogPartition(), numPartitions);

          callback.init();

          KafkaConsumer kafkaConsumer = new KafkaConsumer(seedBrokers, topic, partition, kafkaTailFetchTimeoutMs);
          try {
            Filter logFilter = new AndFilter(ImmutableList.of(LoggingContextHelper.createFilter(loggingContext),
                                                              filter));

            long latestOffset = kafkaConsumer.fetchOffset(KafkaConsumer.Offset.LATEST);
            long startOffset = fromOffset + 1;

            if (startOffset >= latestOffset) {
              // At end of events, nothing to return
              return;
            }

            fetchLogEvents(kafkaConsumer, logFilter, startOffset, latestOffset, maxEvents, callback);
          } catch (Throwable e) {
            LOG.error("Got exception: ", e);
            throw  Throwables.propagate(e);
          } finally {
            try {
              try {
                callback.close();
              } finally {
                kafkaConsumer.close();
              }
            } catch (IOException e) {
              LOG.error(String.format("Caught exception when closing KafkaConsumer for topic %s, partition %d",
                                      topic, partition), e);
            }
View Full Code Here

Examples of co.cask.cdap.logging.kafka.KafkaConsumer

        public void run() {
          int partition = partitioner.partition(loggingContext.getLogPartition(), numPartitions);

          callback.init();

          KafkaConsumer kafkaConsumer = new KafkaConsumer(seedBrokers, topic, partition, kafkaTailFetchTimeoutMs);
          try {
            Filter logFilter = new AndFilter(ImmutableList.of(LoggingContextHelper.createFilter(loggingContext),
                                                              filter));

            long latestOffset = kafkaConsumer.fetchOffset(KafkaConsumer.Offset.LATEST);
            long earliestOffset = kafkaConsumer.fetchOffset(KafkaConsumer.Offset.EARLIEST);
            long stopOffset;
            long startOffset;

            if (fromOffset < 0)  {
              stopOffset = latestOffset;
            } else {
              stopOffset = fromOffset;
            }
            startOffset = stopOffset - maxEvents;

            if (startOffset < earliestOffset) {
              startOffset = earliestOffset;
            }

            if (startOffset >= stopOffset || startOffset >= latestOffset) {
              // At end of kafka events, nothing to return
              return;
            }

            // Events between startOffset and stopOffset may not have the required logs we are looking for,
            // we'll need to return at least 1 log offset for next getLogPrev call to work.
            int fetchCount = 0;
            while (fetchCount == 0) {
              fetchCount = fetchLogEvents(kafkaConsumer, logFilter, startOffset, stopOffset, maxEvents, callback);
              stopOffset = startOffset;
              if (stopOffset <= earliestOffset) {
                // Truly no log messages found.
                break;
              }

              startOffset = stopOffset - maxEvents;
              if (startOffset < earliestOffset) {
                startOffset = earliestOffset;
              }
            }
          } catch (Throwable e) {
            LOG.error("Got exception: ", e);
            throw  Throwables.propagate(e);
          } finally {
            try {
              try {
                callback.close();
              } finally {
                kafkaConsumer.close();
              }
            } catch (IOException e) {
              LOG.error(String.format("Caught exception when closing KafkaConsumer for topic %s, partition %d",
                                      topic, partition), e);
            }
View Full Code Here

Examples of com.netflix.suro.input.kafka.KafkaConsumer

        properties.setProperty("group.id", "testkafkaconsumer");
        properties.setProperty("zookeeper.connect", zk.getConnectionString());
        properties.setProperty("auto.offset.reset", "smallest");

        properties.setProperty("consumer.timeout.ms", "1000");
        KafkaConsumer consumer = new KafkaConsumer(properties, TOPIC_NAME, router, jsonMapper);

        consumer.start();

        // set the pause threshold to 100
        QueuedSink.MAX_PENDING_MESSAGES_TO_PAUSE = 100;

        Thread t = createProducerThread(jsonMapper, kafkaSink, TOPIC_NAME);

        // wait until queue's is full over the threshold
        int count = 0;
        while (count < 3) {
            if (sink.getNumOfPendingMessages() >= QueuedSink.MAX_PENDING_MESSAGES_TO_PAUSE) {
                ++count;
            }
            Thread.sleep(1000);
        }

        // get the number of pending messages for 10 seconds
        ArrayList<Integer> countList = new ArrayList<Integer>();
        for (int i = 0; i < 10; ++i) {
            countList.add((int) sink.getNumOfPendingMessages());
            Thread.sleep(1000);
        }

        for (int i = 6; i < 9; ++i) {
            assertEquals(countList.get(i), countList.get(i + 1), 5);
        }

        rateLimiter.setRate(Double.MAX_VALUE);

        run.set(false);
        t.join();

        consumer.shutdown();

        sink.close();

        assertEquals(sink.getNumOfPendingMessages(), 0);
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.