Package org.apache.hadoop.chukwa

Examples of org.apache.hadoop.chukwa.ChunkImpl


    JSONObject json = getJSONObject();
    json.put("packetsSent", "2049");
    json.put("NodeCount", "40");
    byte[] data = json.toString().getBytes();
    ZookeeperProcessor p = new ZookeeperProcessor();
    ChunkImpl ch = new ChunkImpl("TestType", "Test", data.length, data,
        null);
    String failMsg = testProcessor(p, json, ch);
    assertNull(failMsg, failMsg);

    // test gauge metric
    json.put("packetsSent", "2122");
    data = json.toString().getBytes();
    ch = new ChunkImpl("TestType", "Test", data.length, data, null);
    json.put("packetsSent", "73");
    failMsg = testProcessor(p, json, ch);
    assertNull(failMsg, failMsg);
  }
View Full Code Here


    try {
      while (!stopping) {
        byte[] data = new byte[BUFFER_SIZE];
        r.nextBytes(data);
        offset += data.length;
        ChunkImpl evt = new ChunkImpl(type, "random data source", offset, data,
            this);
        dest.add(evt);

      }
    } catch (InterruptedException ie) {
View Full Code Here

      List<Chunk> events = new LinkedList<Chunk>();
      StringBuilder sb = new StringBuilder();

      for (int i = 0; i < numEvents; i++) {
        ChunkImpl logEvent = ChunkImpl.read(di);
        events.add(logEvent);

        if (FANCY_DIAGNOSTICS) {
          diagnosticPage.sawChunk(logEvent, i);
        }
View Full Code Here

     // for each line, create a chunk and an arckive key, pass it to the
     // processor, then write it to the sequence file. 
     while ((line = reader.readLine()) != null) {

       ChunkImpl chunk = new ChunkImpl(dataType, streamName,
         line.length()  + lastSeqID, line.getBytes(), null);
       lastSeqID += line.length();
       chunk.addTag("cluster=\"" + clusterName + "\"");

       ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
       archiveKey.setTimePartition(System.currentTimeMillis());
       archiveKey.setDataType(chunk.getDataType());
       archiveKey.setStreamName(chunk.getStreamName());
       archiveKey.setSeqId(chunk.getSeqID());

       processor.process(archiveKey, chunk, collector, null);
       seqFileWriter.append(collector.getChukwaRecordKey(),
                            collector.getChukwaRecord());
     }
View Full Code Here

     */
    @Override
    public void reduce(ChukwaArchiveKey key, Iterator<ChunkImpl> vals,
        OutputCollector<ChukwaArchiveKey, ChunkImpl> out, Reporter r)
        throws IOException {
      ChunkImpl i = vals.next();
      out.collect(key, i);
      int dups = 0;
      while(vals.hasNext()) {
        vals.next();
        dups ++;
View Full Code Here

   * @return the number of bytes processed
   * @throws InterruptedException
   */
  protected int extractRecords(final ChunkReceiver eq, long buffOffsetInFile,
      byte[] buf, long fileTime) throws InterruptedException {
    final ChunkImpl chunk = new ChunkImpl(type, toWatch.getAbsolutePath(),
        buffOffsetInFile + buf.length, buf, this);
    chunk.addTag("time=\"" + fileTime + "\"");
    log.info("Adding " + toWatch.getAbsolutePath() + " to the queue");
    eq.add(chunk);
    log.info( toWatch.getAbsolutePath() + " added to the queue");
    return buf.length;
  }
View Full Code Here

          String stdout = o.getString("stdout");
          data = stdout.getBytes();
        }

        sendOffset += data.length;
        ChunkImpl c = new ChunkImpl(ExecAdaptor.this.type, "results from "
            + cmd, sendOffset, data, ExecAdaptor.this);

        if (SPLIT_LINES) {
          ArrayList<Integer> carriageReturns = new ArrayList<Integer>();
          for (int i = 0; i < data.length; ++i)
            if (data[i] == '\n')
              carriageReturns.add(i);

          c.setRecordOffsets(carriageReturns);
        } // else we get default one record


        //We can't replay exec data, so we might as well commit to it now.
        control.reportCommit(ExecAdaptor.this, sendOffset);
View Full Code Here

    PipelineStageWriter psw = new PipelineStageWriter();
    psw.init(conf);

    System.out.println("pipeline established; now pushing a chunk");
    ArrayList<Chunk> l = new ArrayList<Chunk>();
    l.add(new ChunkImpl("dt", "name", 1, new byte[] {'a'}, null));
    psw.add(l);
    //push a chunk through. It should get written, but the socket tee shouldn't do anything.
    assertEquals(1, CaptureWriter.outputs.size());
    //now connect and set up a filter.
   
    System.out.println("connecting to localhost");
    Socket s = new Socket("localhost", SocketTeeWriter.DEFAULT_PORT);
//   s.setSoTimeout(2000);
    DataOutputStream dos = new DataOutputStream (s.getOutputStream());
    dos.write((SocketTeeWriter.WRITABLE + " datatype=dt3\n").getBytes());
    DataInputStream dis = new DataInputStream(s.getInputStream());

    System.out.println("command send");

    dis.readFully(new byte[3]);
    //push a chunk not matching filter -- nothing should happen.
    l = new ArrayList<Chunk>();
    l.add(new ChunkImpl("dt2", "name", 1, new byte[] {'b'}, null));
    psw.add(l);
    assertEquals(2, CaptureWriter.outputs.size());

    System.out.println("sent nonmatching chunk");

    //and now one that does match -- data should be available to read off the socket

    l = new ArrayList<Chunk>();
    l.add(new ChunkImpl("dt3", "name", 1, new byte[] {'c'}, null));
    psw.add(l);
    assertEquals(3, CaptureWriter.outputs.size());

    System.out.println("sent matching chunk");
   
    System.out.println("reading...");
    ChunkImpl chunk = ChunkImpl.read(dis);
    assertTrue(chunk.getDataType().equals("dt3"));
    System.out.println(chunk);

    dis.close();
    dos.close();
    s.close();
   
    Socket s2 = new Socket("localhost", SocketTeeWriter.DEFAULT_PORT);
    s2.getOutputStream().write((SocketTeeWriter.RAW+" content=.*d.*\n").getBytes());
    dis = new DataInputStream(s2.getInputStream());
    dis.readFully(new byte[3]); //read "OK\n"
    l = new ArrayList<Chunk>();
    l.add(new ChunkImpl("dt3", "name", 1, new byte[] {'d'}, null));
    psw.add(l);
    assertEquals(4, CaptureWriter.outputs.size());

    int len = dis.readInt();
    assertTrue(len == 1);
    byte[] data = new byte[100];
    int read = dis.read(data);
    assertTrue(read == 1);
    assertTrue(data[0] == 'd');
   
    s2.close();
    dis.close();
   
    l = new ArrayList<Chunk>();
    l.add(new ChunkImpl("dt3", "name", 3, new byte[] {'c', 'a', 'd'}, null));
    psw.add(l);
    assertEquals(5, CaptureWriter.outputs.size());
   
   
    Socket s3 = new Socket("localhost", SocketTeeWriter.DEFAULT_PORT);
    s3.getOutputStream().write((SocketTeeWriter.ASCII_HEADER+" all\n").getBytes());
    dis = new DataInputStream(s3.getInputStream());
    dis.readFully(new byte[3]); //read "OK\n"
    l = new ArrayList<Chunk>();
    chunk= new ChunkImpl("dataTypeFoo", "streamName", 4, new byte[] {'t','e','x','t'}, null);
    chunk.setSource("hostNameFoo");
    l.add(chunk);
    psw.add(l);
    assertEquals(6, CaptureWriter.outputs.size());
    len = dis.readInt();
    data = new byte[len];
View Full Code Here

    SocketDataLoader sdl = new SocketDataLoader("all");
   
    System.out.println("pipeline established; now pushing a chunk");
    ArrayList<Chunk> l = new ArrayList<Chunk>();
    l.add(new ChunkImpl("dt", "name", 1, new byte[] {'a'}, null));
    psw.add(l);
    //push a chunk through. SocketDataLoader should receive this chunk.
   
    try {
      Collection<Chunk> clist = sdl.read();
View Full Code Here

    SequenceFile.Reader reader = null;
    try {
      reader = new SequenceFile.Reader(fs, new Path(file), conf);

      ChukwaArchiveKey key = new ChukwaArchiveKey();
      ChunkImpl chunk = ChunkImpl.getBlankChunk();

      StringBuilder sb = new StringBuilder();
      while (reader.next(key, chunk)) {
        sb.append("\nTimePartition: " + key.getTimePartition());
        sb.append("DataType: " + key.getDataType());
        sb.append("StreamName: " + key.getStreamName());
        sb.append("SeqId: " + key.getSeqId());
        sb.append("\t\t =============== ");

        sb.append("Cluster : " + chunk.getTags());
        sb.append("DataType : " + chunk.getDataType());
        sb.append("Source : " + chunk.getSource());
        sb.append("Application : " + chunk.getStreamName());
        sb.append("SeqID : " + chunk.getSeqID());
        sb.append("Data : " + new String(chunk.getData()));
        return sb.toString();
      }
    } catch (Throwable e) {
     Assert.fail("Exception while reading SeqFile"+ e.getMessage());
     throw e;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.chukwa.ChunkImpl

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.