Package com.cloudera.flume.core

Examples of com.cloudera.flume.core.EventSink


        String name = argv[0];
        long period = FlumeConfiguration.get().getHistoryDefaultPeriod();
        if (argv.length == 2) {
          period = Integer.parseInt(argv[1]);
        }
        EventSink snk = new CountHistoryReporter(name, period);
        return snk;
      }
    };
  }
View Full Code Here


   * fails twice and then succeeds.
   */
  @Test
  public void testInsistent() throws IOException, InterruptedException {
    // TODO(henry): this test relies on real clocks, and shouldn't. See below.
    EventSink fail2x = mock(EventSink.Base.class);
    // two exceptions then some success
    doThrow(new IOException("mock2")).doThrow(new IOException("mock"))
        .doNothing().when(fail2x).open();
    doReturn(new ReportEvent("stub")).when(fail2x).getMetrics();

    // max 5s, backoff initially at 10ms

    // WARNING! This test relies on being able to sleep for ~10ms and be woken
    // up three times in a 5s period. Seems plausible! But if you are looking at
    // this comment, it's probably because the test is failing on a loaded
    // machine...
    BackoffPolicy bop = new CappedExponentialBackoff(10, 5000);
    InsistentOpenDecorator<EventSink> sink = new InsistentOpenDecorator<EventSink>(
        fail2x, bop);
    sink.open();
    sink.append(new EventImpl("test".getBytes()));
    sink.close();
    fail2x.getMetrics();

    ReportEvent rpt = sink.getMetrics();
    assertEquals(new Long(1), rpt
        .getLongMetric(InsistentOpenDecorator.A_REQUESTS));
    assertEquals(new Long(3), rpt
View Full Code Here

        saves.add(notif);
      }
    };

    String path = "file://" + f + "/%Y-%m-%d/";
    EventSink snk = new HiveNotifyingDfsSink(path, "file-%{host}", "hivetable",
        new AvroJsonOutputFormat(), hfrh);

    snk.open();
    long day_millis = 1000 * 60 * 60 * 24;
    Event e1 = new EventImpl(new byte[0], Clock.unixTime(), Priority.INFO, 0,
        "localhost");
    Event e2 = new EventImpl(new byte[0], e1.getTimestamp() + day_millis,
        Priority.INFO, 0, "localhost");
    Event e3 = new EventImpl(new byte[0], e1.getTimestamp() + 2 * day_millis,
        Priority.INFO, 0, "localhost");
    snk.append(e1);
    snk.append(e2);
    snk.append(e3);
    snk.close();

    FileUtil.rmr(f);

    assertEquals(3, saves.get(0).meta.size()); // 3 date files, but not host.
    assertEquals("hivetable", saves.get(0).table);
View Full Code Here

        Priority.INFO, 0, "localhost");
    Event e3 = new EventImpl(new byte[0], e1.getTimestamp() + 2 * day_millis,
        Priority.INFO, 0, "localhost");

    String path = "file://" + f + "/%Y-%m-%d/";
    EventSink snk = new HiveNotifyingDfsSink(path, "file-%{host}", "hivetable",
        new AvroJsonOutputFormat(), hfrh);

    snk.open();
    snk.append(e1);
    snk.append(e2);
    snk.append(e3);
    snk.close();

    // Simulate a roll by send the messages a second time sink using the same
    // HiveNewDirNotification handlers.

    // TODO (jon) fix this sink's open close has a problem. here just
    // instantiating a new one
    snk = new HiveNotifyingDfsSink(path, "file-%{host}", "hivetable",
        new AvroJsonOutputFormat(), hfrh);
    snk.open();
    snk.append(e1);
    snk.append(e2);
    snk.append(e3);
    snk.close();

    FileUtil.rmr(f);

    assertEquals(3, saves.get(0).meta.size()); // 3 date files, but not host.
    assertEquals("hivetable", saves.get(0).table);
View Full Code Here

    LOG.info("filename before escaping: " + f.getAbsolutePath());
    String custom = "text(\""
        + StringEscapeUtils.escapeJava(f.getAbsolutePath())
        + "\", \"avrodata\")";
    LOG.info("sink to parse: " + custom);
    EventSink snk = FlumeBuilder.buildSink(new Context(), custom);
    snk.open();
    mem.open();
    EventUtil.dumpAll(mem, snk);
    snk.close();

    mem.open();
    DatumReader<EventImpl> dtm = new ReflectDatumReader<EventImpl>(
        EventImpl.class);
    DataFileReader<EventImpl> dr = new DataFileReader<EventImpl>(f, dtm);
View Full Code Here

  @Test
  public void testStubbornAppendMetrics() throws JSONException,
      FlumeSpecException, IOException, InterruptedException {
    ReportTestUtils.setupSinkFactory();

    EventSink snk = FlumeBuilder.buildSink(new ReportTestingContext(),
        "ackChecker one");
    ReportEvent rpt = ReportUtil.getFlattenedReport(snk);
    LOG.info(ReportUtil.toJSONObject(rpt).toString());
    assertNotNull(rpt.getLongMetric(AckChecksumChecker.A_ACK_ENDS));
    assertNotNull(rpt.getLongMetric(AckChecksumChecker.A_ACK_FAILS));
View Full Code Here

      // this is wraps the normal roll sink with an extra roll detection
      // decorator that triggers ack delivery on close.
      @Override
      public EventSink newSink(Context ctx) throws IOException {
        String tag = tagger.newTag();
        EventSink drain;
        try {
          drain = new CompositeSink(ctx, snkSpec);
        } catch (FlumeSpecException e) {
          throw new IOException("Unable to instantiate '" + snkSpec + "'", e);
        }
        return new RollDetectDeco(drain, tag);
      }
    };

    long initMs = FlumeConfiguration.get().getInsistentOpenInitBackoff();
    long cumulativeMaxMs = FlumeConfiguration.get()
        .getFailoverMaxCumulativeBackoff();
    long maxMs = FlumeConfiguration.get().getFailoverMaxSingleBackoff();
    BackoffPolicy backoff1 = new CumulativeCappedExponentialBackoff(initMs,
        maxMs, cumulativeMaxMs);
    BackoffPolicy backoff2 = new CumulativeCappedExponentialBackoff(initMs,
        maxMs, cumulativeMaxMs);

    // the collector snk has ack checking logic, retry and reopen logic, and
    // needs an extra mask before rolling, writing to disk and forwarding acks
    // (roll detect).

    // { ackChecksumChecker => insistentAppend => stubbornAppend =>
    // insistentOpen => mask("rolltag") => roll(xx) { rollDetect =>
    // subsink } }
    EventSink tmp = new MaskDecorator<EventSink>(roller, "rolltag");
    tmp = new InsistentOpenDecorator<EventSink>(tmp, backoff1);
    tmp = new StubbornAppendSink<EventSink>(tmp);
    tmp = new InsistentAppendDecorator<EventSink>(tmp, backoff2);
    snk = new AckChecksumChecker<EventSink>(tmp, accum);
  }
View Full Code Here

        long millis = FlumeConfiguration.get().getCollectorRollMillis();
        if (argv.length >= 2) {
          millis = Long.parseLong(argv[1]);
        }
        try {
          EventSink deco = new CollectorSink(context, snkSpec, millis,
              FlumeNode.getInstance().getCollectorAckListener());
          return deco;
        } catch (FlumeSpecException e) {
          LOG.error("CollectorDecorator spec error " + e, e);
          throw new IllegalArgumentException(
View Full Code Here

        }
        if (argv.length >= 3) {
          millis = Long.parseLong(argv[2]);
        }
        try {
          EventSink snk = new CollectorSink(context, logdir, prefix, millis,
              new ProcessTagger(), 250, FlumeNode.getInstance()
                  .getCollectorAckListener());
          return snk;
        } catch (FlumeSpecException e) {
          LOG.error("CollectorSink spec error " + e, e);
View Full Code Here

  @Test
  public void testStubborn() throws IOException, InterruptedException {
    // just using as an int reference
    final AtomicInteger ok = new AtomicInteger();

    EventSink failAppend = new EventSink.Base() {
      int n = 4; // fail every nth append
      int count = 0;

      @Override
      public void append(Event e) throws IOException {
View Full Code Here

TOP

Related Classes of com.cloudera.flume.core.EventSink

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.