Package com.cloudera.flume.core

Examples of com.cloudera.flume.core.EventSink


    FlumeNode node = FlumeNode.getInstance();
    File tmpdir = FileUtil.mktempdir();

    EventSource ackedmem = setupAckRoll();
    Pair<RollSink, EventSink> p = setupSink(node, tmpdir);
    EventSink snk = p.getRight();
    RollSink roll = p.getLeft();
    snk.open();

    String tag1 = roll.getCurrentTag();
    LOG.info(tag1);
    snk.append(ackedmem.next()); // ack beg
    snk.append(ackedmem.next()); // data
    snk.append(ackedmem.next()); // ack end
    snk.append(ackedmem.next()); // ack beg
    Clock.sleep(10); // have to make sure it is not in the same millisecond
    roll.rotate(); // we should have the first batch and part of the second
    // one ack pending
    assertEquals(1, node.getAckChecker().getPendingAckTags().size());
    node.getAckChecker().checkAcks();
    // no acks pending
    assertEquals(0, node.getAckChecker().getPendingAckTags().size());

    // we are partially through the second batch, at a different split point

    String tag2 = roll.getCurrentTag();
    LOG.info(tag2);
    snk.append(ackedmem.next()); // data
    snk.append(ackedmem.next()); // ack end
    snk.append(ackedmem.next()); // ack beg
    snk.append(ackedmem.next()); // data
    snk.append(ackedmem.next()); // ack end
    Clock.sleep(10); // have to make sure it is not in the same millisecond
    roll.rotate();
    // now we have closed off group2 and group3
    assertEquals(2, node.getAckChecker().getPendingAckTags().size());
    node.getAckChecker().checkAcks();
    Clock.sleep(10); // have to make sure it is not in the same millisecond

    // no more acks left
    LOG.info("pending ack tags: " + node.getAckChecker().getPendingAckTags());
    assertEquals(0, node.getAckChecker().getPendingAckTags().size());

    snk.close();

    FileUtil.rmr(tmpdir);
    BenchmarkHarness.cleanupLocalWriteDir();
  }
View Full Code Here


   * close can happen before open has completed.
   */
  @Test
  public void testHdfsDownInterruptBeforeOpen() throws FlumeSpecException,
      IOException, InterruptedException {
    final EventSink snk = FlumeBuilder.buildSink(new Context(),
        "collectorSink(\"hdfs://nonexistant/user/foo\", \"foo\")");

    final CountDownLatch done = new CountDownLatch(1);

    Thread t = new Thread("append thread") {
      public void run() {
        Event e = new EventImpl("foo".getBytes());
        try {
          snk.open();

          snk.append(e);
        } catch (IOException e1) {
          // could be exception but we don't care
          LOG.info("don't care about this exception: ", e1);
        } catch (InterruptedException e1) {
          // TODO Auto-generated catch block
          e1.printStackTrace();
        }
        done.countDown();
      }
    };
    t.start();
    snk.close();
    t.interrupt();
    boolean completed = done.await(60, TimeUnit.SECONDS);
    assertTrue("Timed out when attempting to shutdown", completed);
  }
View Full Code Here

   * close always happens after open has completed.
   */
  @Test
  public void testHdfsDownInterruptAfterOpen() throws FlumeSpecException,
      IOException, InterruptedException {
    final EventSink snk = FlumeBuilder.buildSink(new Context(),
        "collectorSink(\"hdfs://nonexistant/user/foo\", \"foo\")");

    final CountDownLatch started = new CountDownLatch(1);
    final CountDownLatch done = new CountDownLatch(1);

    Thread t = new Thread("append thread") {
      public void run() {
        Event e = new EventImpl("foo".getBytes());
        try {
          snk.open();
          started.countDown();
          snk.append(e);
        } catch (IOException e1) {
          // could be an exception but we don't care.
          LOG.info("don't care about this exception: ", e1);
        } catch (InterruptedException e1) {
          // TODO Auto-generated catch block
          e1.printStackTrace();
        }
        done.countDown();
      }
    };
    t.start();
    boolean begun = started.await(60, TimeUnit.SECONDS);
    assertTrue("took too long to start", begun);
    snk.close();
    LOG.info("Interrupting appending thread");
    t.interrupt();
    boolean completed = done.await(60, TimeUnit.SECONDS);
    assertTrue("Timed out when attempting to shutdown", completed);
  }
View Full Code Here

   * close always happens after open started retrying.
   */
  @Test
  public void testHdfsDownInterruptAfterOpeningRetry()
      throws FlumeSpecException, IOException, InterruptedException {
    final EventSink snk = new LazyOpenDecorator(FlumeBuilder.buildSink(
        new Context(),
        "collectorSink(\"hdfs://nonexistant/user/foo\", \"foo\")"));

    final CountDownLatch started = new CountDownLatch(1);
    final CountDownLatch done = new CountDownLatch(1);

    Thread t = new Thread("append thread") {
      public void run() {
        Event e = new EventImpl("foo".getBytes());
        try {
          snk.open();
          started.countDown();
          snk.append(e);
        } catch (IOException e1) {
          // could throw exception but we don't care
          LOG.info("don't care about this exception: ", e1);
        } catch (InterruptedException e1) {
          // TODO Auto-generated catch block
          e1.printStackTrace();
        }
        done.countDown();
      }
    };
    t.start();
    boolean begun = started.await(60, TimeUnit.SECONDS);
    Clock.sleep(10);
    assertTrue("took too long to start", begun);
    snk.close();
    LOG.info("Interrupting appending thread");
    t.interrupt();
    boolean completed = done.await(60, TimeUnit.SECONDS);
    assertTrue("Timed out when attempting to shutdown", completed);
  }
View Full Code Here

  @Test
  public void testMultipleSinks() throws FlumeSpecException, IOException,
      InterruptedException {
    String spec = "collector(5000) { [ counter(\"foo\"), counter(\"bar\") ] }";
    EventSink snk = FlumeBuilder.buildSink(new ReportTestingContext(
        LogicalNodeContext.testingContext()), spec);
    snk.open();
    snk.append(new EventImpl("this is a test".getBytes()));
    snk.close();
    ReportEvent rpta = ReportManager.get().getReportable("foo").getMetrics();
    assertEquals(1, (long) rpta.getLongMetric("foo"));
    ReportEvent rptb = ReportManager.get().getReportable("bar").getMetrics();
    assertEquals(1, (long) rptb.getLongMetric("bar"));
  }
View Full Code Here

  @Test
  public void testAgentDFOCollector() throws IOException, FlumeSpecException,
      InterruptedException {
    String agentCollector = "{diskFailover(1000) => roll (100000) { null } }";
    Event e = new EventImpl("foo".getBytes());
    EventSink agent = FlumeBuilder.buildSink(LogicalNodeContext
        .testingContext(), agentCollector);
    agent.open();
    agent.append(e);

    for (int i = 0; i < 30; i++) {
      Clock.sleep(100);
      ReportEvent r = mem.getReport();
      LOG.info(r);
View Full Code Here

public class TestHierarchicalReports {

  @Test
  public void testSimple() throws FlumeSpecException {
    String s = "console";
    EventSink sink = FlumeBuilder.buildSink(new Context(), s);

    Map<String, ReportEvent> reports = new HashMap<String, ReportEvent>();
    sink.getReports("X.", reports);
    String r = "";
    for (Entry<String, ReportEvent> e : reports.entrySet()) {
      r += e.getKey() + " = " + e.getValue().toText();
    }
    System.out.println(r);

    Assert.assertTrue(r.contains("X." + sink.getName()));
  }
View Full Code Here

  @Test
  public void testCollectorSource() throws FlumeSpecException, IOException,
      InterruptedException {
    EventSource src = FlumeBuilder.buildSource(LogicalNodeContext
        .testingContext(), "collectorSource(34568)");
    EventSink snk = FlumeBuilder.buildSink(new Context(),
        "rpcSink(\"localhost\", 34568)");
    src.open();
    snk.open();
    snk.append(new EventImpl("foo".getBytes()));
    src.next();
    snk.close();
    src.close();

  }
View Full Code Here

      InterruptedException {
    String rpt = "foo";
    String snk = " { ackedWriteAhead(100) => [console,  counter(\"" + rpt
        + "\") ] } ";
    for (int i = 0; i < 100; i++) {
      EventSink es = FlumeBuilder.buildSink(
          LogicalNodeContext.testingContext(), snk);
      es.open();
      es.close();
    }

  }
View Full Code Here

    int count = 10;
    String rpt = "foo";
    String snk = " { ackedWriteAhead(500) => { ackChecker => [console,  counter(\""
        + rpt + "\") ] } } ";

    EventSink es = FlumeBuilder.buildSink(new ReportTestingContext(
        LogicalNodeContext.testingContext()), snk);

    es.open();
    for (int i = 0; i < count; i++) {
      Event e = new EventImpl(("test message " + i).getBytes());
      System.out.println("initial append: " + e);
      es.append(e);
      Clock.sleep(100);
    }
    Clock.sleep(5000);
    es.close();

    CounterSink ctr = (CounterSink) ReportManager.get().getReportable(rpt);
    Assert.assertEquals(count, ctr.getCount());
  }
View Full Code Here

TOP

Related Classes of com.cloudera.flume.core.EventSink

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.