Package org.apache.hadoop.chukwa

Examples of org.apache.hadoop.chukwa.ChunkImpl


   * @return the number of bytes processed
   * @throws InterruptedException
   */
  protected int extractRecords(final ChunkReceiver eq, long buffOffsetInFile,
      byte[] buf, long fileTime) throws InterruptedException {
    final ChunkImpl chunk = new ChunkImpl(type, toWatch.getAbsolutePath(),
        buffOffsetInFile + buf.length, buf, this);
    chunk.addTag("time=\"" + fileTime + "\"");
    log.info("Adding " + toWatch.getAbsolutePath() + " to the queue");
    eq.add(chunk);
    log.info( toWatch.getAbsolutePath() + " added to the queue");
    return buf.length;
  }
View Full Code Here


          String stdout = o.getString("stdout");
          data = stdout.getBytes();
        }

        sendOffset += data.length;
        ChunkImpl c = new ChunkImpl(ExecAdaptor.this.type, "results from "
            + cmd, sendOffset, data, ExecAdaptor.this);

        if (SPLIT_LINES) {
          ArrayList<Integer> carriageReturns = new ArrayList<Integer>();
          for (int i = 0; i < data.length; ++i)
            if (data[i] == '\n')
              carriageReturns.add(i);

          c.setRecordOffsets(carriageReturns);
        } // else we get default one record


        //We can't replay exec data, so we might as well commit to it now.
        control.reportCommit(ExecAdaptor.this, sendOffset);
View Full Code Here

  protected int extractRecords(ChunkReceiver eq, long buffOffsetInFile,
      byte[] buf) throws InterruptedException {
    if(buf.length == 0)
      return 0;
   
    ChunkImpl chunk = new ChunkImpl(type, toWatch.getAbsolutePath(),
        buffOffsetInFile + buf.length, buf, this);

    eq.add(chunk);
    return buf.length;
  }
View Full Code Here

      List<Chunk> events = new LinkedList<Chunk>();
      StringBuilder sb = new StringBuilder();

      for (int i = 0; i < numEvents; i++) {
        ChunkImpl logEvent = ChunkImpl.read(di);
        events.add(logEvent);

        if (FANCY_DIAGNOSTICS) {
          diagnosticPage.sawChunk(logEvent, i);
        }
View Full Code Here

     SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, out,
         ChukwaArchiveKey.class, ChunkImpl.class,
         SequenceFile.CompressionType.NONE, null);
     RandSeqFileWriter rw = new RandSeqFileWriter();
     for (int i = 0; i < chunks; ++i) {
       ChunkImpl chunk = rw.getARandomChunk();
       ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();

       calendar.set(Calendar.YEAR, 2008);
       calendar.set(Calendar.MONTH, Calendar.MAY);
       calendar.set(Calendar.DAY_OF_MONTH, 29);
       calendar.set(Calendar.HOUR, 10);
       calendar.set(Calendar.MINUTE, 0);
       calendar.set(Calendar.SECOND, 0);
       calendar.set(Calendar.MILLISECOND, 0);
       archiveKey.setTimePartition(calendar.getTimeInMillis());
       archiveKey.setDataType(chunk.getDataType());
       archiveKey.setStreamName(chunk.getStreamName());
       archiveKey.setSeqId(chunk.getSeqID());
       seqFileWriter.append(archiveKey, chunk);
     }
     seqFileWriter.close();
     out.close();
   }
View Full Code Here

       int ms = r.nextInt(1000);
       String line = "2008-05-29 10:42:22," + ms
           + " INFO org.apache.hadoop.dfs.DataNode: Some text goes here"
           + r.nextInt() + "\n";
  
       ChunkImpl c = new ChunkImpl("HadoopLogProcessor", "test",
           line.length()  + lastSeqID, line.getBytes(), null);
       lastSeqID += line.length();
       c.addTag("cluster=\"foocluster\"");
       return c;
     }
View Full Code Here

  java.util.Random r = new java.util.Random();
  public ChunkImpl getARandomChunk() {
    int ms = r.nextInt(1000);
    String line = "2008-05-29 10:42:22,"+ ms + " INFO org.apache.hadoop.dfs.DataNode: Some text goes here" +r.nextInt() + "\n";

    ChunkImpl c = new ChunkImpl("HadoopLogProcessor", "test", line.length() -1L, line.getBytes(), null);
    return c;
  }
View Full Code Here

    Calendar calendar = Calendar.getInstance();
    SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, out,
        ChukwaArchiveKey.class, ChunkImpl.class,
        SequenceFile.CompressionType.NONE, null);
    for(int i=0; i < chunks; ++i) {
      ChunkImpl chunk = getARandomChunk();
      ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
      // FIXME compute this once an hour
      calendar.setTimeInMillis(System.currentTimeMillis());
      calendar.set(Calendar.MINUTE, 0);
      calendar.set(Calendar.SECOND, 0);
      calendar.set(Calendar.MILLISECOND, 0);
      archiveKey.setTimePartition(calendar.getTimeInMillis());
      archiveKey.setDataType(chunk.getDataType());
      archiveKey.setStreamName(chunk.getStreamName());
      archiveKey.setSeqId(chunk.getSeqID());
      seqFileWriter.append(archiveKey, chunk);
    }
    seqFileWriter.close();
    out.close();
  }
View Full Code Here

        diagnosticPage.sawPost(req.getRemoteHost(), numEvents, currentTime);
      for (int i = 0; i < numEvents; i++){
        // TODO: pass new data to all registered stream handler methods for this chunk's stream
        // TODO: should really have some dynamic assignment of events to writers

        ChunkImpl logEvent =  ChunkImpl.read(di);

        if(FANCY_DIAGNOSTICS)
          diagnosticPage.sawChunk(logEvent, i);
       
        // write new data to data sync file
        if(writer != null) {
          writer.add(logEvent)//save() blocks until data is written
          //this is where we ACK this connection
          l_out.print("ok:");
          l_out.print(logEvent.getData().length);
          l_out.print(" bytes ending at offset ");
          l_out.println(logEvent.getSeqID()-1);
        }
        else
          l_out.println("can't write: no writer")
      }
View Full Code Here

        for(int i = 0; i < data.length ; ++i)
          if(data[i] == '\n')
            carriageReturns.add(i);
       
        sendOffset += data.length;
        ChunkImpl c = new ChunkImpl(ExecAdaptor.this.type,
            "results from " + cmd, sendOffset , data, ExecAdaptor.this);
        c.setRecordOffsets(carriageReturns);
        dest.add(c);
      } catch(JSONException e ) {
        //FIXME: log this somewhere
      } catch (InterruptedException e)  {
        // TODO Auto-generated catch block
View Full Code Here

TOP

Related Classes of org.apache.hadoop.chukwa.ChunkImpl

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.