Package org.apache.hadoop.chukwa

Examples of org.apache.hadoop.chukwa.ChunkImpl


    SequenceFile.Reader reader = null;
    try {
      reader = new SequenceFile.Reader(fs, new Path(file), conf);

      ChukwaArchiveKey key = new ChukwaArchiveKey();
      ChunkImpl chunk = ChunkImpl.getBlankChunk();

      StringBuilder sb = new StringBuilder();
      while (reader.next(key, chunk)) {
        sb.append("\nTimePartition: " + key.getTimePartition());
        sb.append("DataType: " + key.getDataType());
        sb.append("StreamName: " + key.getStreamName());
        sb.append("SeqId: " + key.getSeqId());
        sb.append("\t\t =============== ");

        sb.append("Cluster : " + chunk.getTags());
        sb.append("DataType : " + chunk.getDataType());
        sb.append("Source : " + chunk.getSource());
        sb.append("Application : " + chunk.getStreamName());
        sb.append("SeqID : " + chunk.getSeqID());
        sb.append("Data : " + new String(chunk.getData()));
        return sb.toString();
      }
    } catch (Throwable e) {
     Assert.fail("Exception while reading SeqFile"+ e.getMessage());
     throw e;
View Full Code Here


   
  }
 
  public void testWriters() {
    ArrayList<Chunk> chunks = new ArrayList<Chunk>();
    chunks.add(new ChunkImpl("TextParser", "name", timestamp, test, null));     
    try {     
      cc.set("hbase.demux.package", "org.apache.chukwa.datacollection.writer.test.demux");
      cc.set("TextParser","org.apache.hadoop.chukwa.datacollection.writer.test.demux.TextParser");
      conf.set(HConstants.ZOOKEEPER_QUORUM, "127.0.0.1");
      hbw = new HBaseWriter(cc, conf);
View Full Code Here

      SequenceFile.Reader r = new SequenceFile.Reader(fs, new Path(fname), conf);
      System.out.println("key class name is " + r.getKeyClassName());
      System.out.println("value class name is " + r.getValueClassName());

      ChukwaArchiveKey key = new ChukwaArchiveKey();
      ChunkImpl evt = ChunkImpl.getBlankChunk();
      int events = 0;
      while (r.next(key, evt) && (events < 5)) {
        if (!Writable.class.isAssignableFrom(key.getClass()))
          System.out.println("warning: keys aren't writable");

        if (!Writable.class.isAssignableFrom(evt.getClass()))
          System.out.println("warning: values aren't writable");

        if (evt.getData().length > 1000) {
          System.out.println("got event; data: "
              + new String(evt.getData(), 0, 1000));
          System.out.println("....[truncating]");
        } else
          System.out.println("got event; data: " + new String(evt.getData()));
        events++;
      }
      System.out.println("file looks OK!");
    } catch (Exception e) {
      e.printStackTrace();
View Full Code Here

             "org.apache.hadoop.chukwa.extraction.demux.processor.mapper.MockMapProcessor");
    mapper.configure(conf);

    ChunkBuilder cb = new ChunkBuilder();
    cb.addRecord(SAMPLE_RECORD_DATA.getBytes());
    ChunkImpl chunk = (ChunkImpl)cb.getChunk();

    ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord> output =
            new ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord>();

    mapper.map(new ChukwaArchiveKey(), chunk, output, Reporter.NULL);
View Full Code Here

          continue;
       
        SequenceFile.Reader reader = new SequenceFile.Reader(localfs, fstatus.getPath(), conf);

        ChukwaArchiveKey key = new ChukwaArchiveKey();
        ChunkImpl chunk = ChunkImpl.getBlankChunk();

        while (reader.next(key, chunk)) {
         bytes.add(new ByteRange(chunk));
        }
        reader.close();
View Full Code Here

    BufferedWriter out = null;
    try {
     
      reader = new SequenceFile.Reader(fs, new Path(dataSinkFile), conf);
      ChukwaArchiveKey key = new ChukwaArchiveKey();
      ChunkImpl chunk = ChunkImpl.getBlankChunk();

      String dataSinkDumpName = dataSinkFile + ".dump";
      out = new BufferedWriter(new FileWriter(dataSinkDumpName));
     


      while (reader.next(key, chunk)) {
        Assert.assertTrue(cluster.equals(RecordUtil.getClusterName(chunk)));
        Assert.assertTrue(dataType.equals(chunk.getDataType()));
        Assert.assertTrue(source.equals(chunk.getSource()));
       
        out.write(new String(chunk.getData()));
        lastSeqId = chunk.getSeqID() ;
      }
     
      out.close();
      out = null;
      reader.close();
View Full Code Here

    PipelineStageWriter psw = new PipelineStageWriter();
    psw.init(conf);

    System.out.println("pipeline established; now pushing a chunk");
    ArrayList<Chunk> l = new ArrayList<Chunk>();
    l.add(new ChunkImpl("dt", "name", 1, new byte[] {'a'}, null));
    psw.add(l);
    //push a chunk through. It should get written, but the socket tee shouldn't do anything.
    assertEquals(1, CaptureWriter.outputs.size());
    //now connect and set up a filter.
   
    System.out.println("connecting to localhost");
    Socket s = new Socket("localhost", SocketTeeWriter.DEFAULT_PORT);
//   s.setSoTimeout(2000);
    DataOutputStream dos = new DataOutputStream (s.getOutputStream());
    dos.write((SocketTeeWriter.WRITABLE + " datatype=dt3\n").getBytes());
    DataInputStream dis = new DataInputStream(s.getInputStream());

    System.out.println("command send");

    dis.readFully(new byte[3]);
    //push a chunk not matching filter -- nothing should happen.
    l = new ArrayList<Chunk>();
    l.add(new ChunkImpl("dt2", "name", 1, new byte[] {'b'}, null));
    psw.add(l);
    assertEquals(2, CaptureWriter.outputs.size());

    System.out.println("sent nonmatching chunk");

    //and now one that does match -- data should be available to read off the socket

    l = new ArrayList<Chunk>();
    l.add(new ChunkImpl("dt3", "name", 1, new byte[] {'c'}, null));
    psw.add(l);
    assertEquals(3, CaptureWriter.outputs.size());

    System.out.println("sent matching chunk");
   
    System.out.println("reading...");
    ChunkImpl chunk = ChunkImpl.read(dis);
    assertTrue(chunk.getDataType().equals("dt3"));
    System.out.println(chunk);

    dis.close();
    dos.close();
    s.close();
   
    Socket s2 = new Socket("localhost", SocketTeeWriter.DEFAULT_PORT);
    s2.getOutputStream().write((SocketTeeWriter.RAW+" content=.*d.*\n").getBytes());
    dis = new DataInputStream(s2.getInputStream());
    dis.readFully(new byte[3]); //read "OK\n"
    l = new ArrayList<Chunk>();
    l.add(new ChunkImpl("dt3", "name", 1, new byte[] {'d'}, null));
    psw.add(l);
    assertEquals(4, CaptureWriter.outputs.size());

    int len = dis.readInt();
    assertTrue(len == 1);
    byte[] data = new byte[100];
    int read = dis.read(data);
    assertTrue(read == 1);
    assertTrue(data[0] == 'd');
   
    s2.close();
    dis.close();
   
    l = new ArrayList<Chunk>();
    l.add(new ChunkImpl("dt3", "name", 3, new byte[] {'c', 'a', 'd'}, null));
    psw.add(l);
    assertEquals(5, CaptureWriter.outputs.size());
//    Thread.sleep(1000);
  
   
View Full Code Here

    SequenceFile.Reader reader = null;
    try {
      reader = new SequenceFile.Reader(fs, new Path(file), conf);

      ChukwaArchiveKey key = new ChukwaArchiveKey();
      ChunkImpl chunk = ChunkImpl.getBlankChunk();

      StringBuilder sb = new StringBuilder();
      while (reader.next(key, chunk)) {
        sb.append("\nTimePartition: " + key.getTimePartition());
        sb.append("DataType: " + key.getDataType());
        sb.append("StreamName: " + key.getStreamName());
        sb.append("SeqId: " + key.getSeqId());
        sb.append("\t\t =============== ");

        sb.append("Cluster : " + chunk.getTags());
        sb.append("DataType : " + chunk.getDataType());
        sb.append("Source : " + chunk.getSource());
        sb.append("Application : " + chunk.getApplication());
        sb.append("SeqID : " + chunk.getSeqID());
        sb.append("Data : " + new String(chunk.getData()));
        return sb.toString();
      }
    } catch (Throwable e) {
     Assert.fail("Exception while reading SeqFile"+ e.getMessage());
     throw e;
View Full Code Here

  private static void dumpFile(Path p, Configuration conf,
      FileSystem fs) throws IOException {
    SequenceFile.Reader r = new SequenceFile.Reader(fs, p, conf);

    ChukwaArchiveKey key = new ChukwaArchiveKey();
    ChunkImpl chunk = ChunkImpl.getBlankChunk();
    try {
      while (r.next(key, chunk)) {
       
        String entryKey = chunk.getSource() +":"+chunk.getDataType() +":" +
        chunk.getApplication();
       
        Integer oldC = counts.get(entryKey);
        if(oldC != null)
          counts.put(entryKey, oldC + 1);
        else
          counts.put(entryKey, new Integer(1));
       
        if(!summarize) {
          System.out.println("\nTimePartition: " + key.getTimePartition());
          System.out.println("DataType: " + key.getDataType());
          System.out.println("StreamName: " + key.getStreamName());
          System.out.println("SeqId: " + key.getSeqId());
          System.out.println("\t\t =============== ");
 
          System.out.println("Cluster : " + chunk.getTags());
          System.out.println("DataType : " + chunk.getDataType());
          System.out.println("Source : " + chunk.getSource());
          System.out.println("Application : " + chunk.getApplication());
          System.out.println("SeqID : " + chunk.getSeqID());
          System.out.println("Data : " + new String(chunk.getData()));
        }
      }
    } catch (Exception e) {
      e.printStackTrace();
    }
View Full Code Here

    try {
      while (!stopping) {
        byte[] data = new byte[BUFFER_SIZE];
        r.nextBytes(data);
        offset += data.length;
        ChunkImpl evt = new ChunkImpl(type, "random data source", offset, data,
            this);
        dest.add(evt);

      }
    } catch (InterruptedException ie) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.chukwa.ChunkImpl

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.