Package org.apache.hadoop.chukwa

Examples of org.apache.hadoop.chukwa.ChunkImpl


 
      SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, out,
          ChukwaArchiveKey.class, ChunkImpl.class,
          SequenceFile.CompressionType.NONE, null);
     
      ChunkImpl chunk;
      while((chunk= getNextChunkFromStdin(stdin)) != null) {
        ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
   
        archiveKey.setTimePartition(calendar.getTimeInMillis());
        archiveKey.setDataType(chunk.getDataType());
        archiveKey.setStreamName(chunk.getStreamName());
        archiveKey.setSeqId(chunk.getSeqID());
        seqFileWriter.append(archiveKey, chunk);
      }
      seqFileWriter.close();
      out.close();
    } catch (Exception e) {
View Full Code Here


    if(sb.length() < 1)
      return null;
   
    String lines = sb.toString();
   
    ChunkImpl c = new ChunkImpl("XTrace", "XtrLoader",
        lines.length()+ lastSeqID, lines.getBytes(), null);
    lastSeqID += lines.length();
    c.addTag("cluster=\"beth_xtrace\"");
   
    return c;
  }
View Full Code Here

    {
      Text t;
      BytesWritable bw;
     
      if(k instanceof ChukwaArchiveKey && v instanceof ChunkImpl) {
        ChunkImpl value = (ChunkImpl) v;
        Report xtrReport = Report.createFromString(new String(value.getData()));
        bw = new BytesWritable(xtrReport.getMetadata().getTaskId().get());
        //FIXME: can probably optimize the above lines by doing a search in the raw bytes
        t= new Text(value.getData());
      } else if(k instanceof ChukwaRecordKey && v instanceof ChukwaRecord){
        ChukwaRecord value = (ChukwaRecord) v;
        Report xtrReport = Report.createFromString(value.getValue(Record.bodyField));
        bw = new BytesWritable(xtrReport.getMetadata().getTaskId().get());
        //FIXME: can probably optimize the above lines by doing a search in the raw bytes
        t= new Text(value.getValue(Record.bodyField));
      } else {
        log.error("unexpected key/value types: "+ k.getClass().getCanonicalName()
            + " and " + v.getClass().getCanonicalName() );
        return;
      }
View Full Code Here

        }
        log.debug("flush out: " + output);
        byte[] data;
        data = output.toString().getBytes();
        sendOffset += data.length;
        ChunkImpl c = new ChunkImpl(source.getType(),"results from "
                + adaptorParams,sendOffset, data, source);
        try {
            log.debug("add one chunk");
            //control.reportCommit(ExecAdaptor.this, sendOffset);
            dest.add(c);
View Full Code Here

    int ms = r.nextInt(1000);
    String line = "2008-05-29 10:42:22," + ms
        + " INFO org.apache.hadoop.dfs.DataNode: Some text goes here"
        + r.nextInt() + "\n";

    ChunkImpl c = new ChunkImpl("HadoopLogProcessor", "test",
        line.length() - 1L, line.getBytes(), null);
    return c;
  }
View Full Code Here

    Calendar calendar = Calendar.getInstance();
    SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, out,
        ChukwaArchiveKey.class, ChunkImpl.class,
        SequenceFile.CompressionType.NONE, null);
    for (int i = 0; i < chunks; ++i) {
      ChunkImpl chunk = getARandomChunk();
      ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
      // FIXME compute this once an hour
      calendar.setTimeInMillis(System.currentTimeMillis());
      calendar.set(Calendar.MINUTE, 0);
      calendar.set(Calendar.SECOND, 0);
      calendar.set(Calendar.MILLISECOND, 0);
      archiveKey.setTimePartition(calendar.getTimeInMillis());
      archiveKey.setDataType(chunk.getDataType());
      archiveKey.setStreamName(chunk.getStreamName());
      archiveKey.setSeqId(chunk.getSeqID());
      seqFileWriter.append(archiveKey, chunk);
    }
    seqFileWriter.close();
    out.close();
  }
View Full Code Here

  public Chunk getNewChunk() {
    int ms = r.nextInt(1000);
    String line = "2008-05-29 10:42:22," + ms
        + " INFO org.apache.hadoop.dfs.DataNode: Some text goes here"
        + r.nextInt() + "\n";
    ChunkImpl c = new ChunkImpl("HadoopLogProcessor", "test",
        line.length() - 1, line.getBytes(), null);

    return c;
  }
View Full Code Here

    FileSystem fs = FileSystem.get(new URI(fsName), conf);

    SequenceFile.Reader r = new SequenceFile.Reader(fs, new Path(args[0]), conf);

    ChukwaArchiveKey key = new ChukwaArchiveKey();
    ChunkImpl chunk = ChunkImpl.getBlankChunk();
    try {
      while (r.next(key, chunk)) {
        if (args[1].equalsIgnoreCase(chunk.getDataType())) {
          if (args[2].equalsIgnoreCase("ALL")
              || args[2].equalsIgnoreCase(chunk.getSource())) {
            System.out.print(new String(chunk.getData()));
          }
        }

      }
    } catch (Exception e) {
View Full Code Here

        // FIXME: I think there's still a risk of integer overflow here
        int arraySize = (int) (MSToSleep * (long) bytesPerSec / 1000L);
        byte[] data = new byte[arraySize];
        r.nextBytes(data);
        offset += data.length;
        ChunkImpl evt = new ChunkImpl(type, "random data source", offset, data,
            this);

        dest.add(evt);

        Thread.sleep(MSToSleep);
View Full Code Here

    FileSystem fs = FileSystem.get(new URI(fsName), conf);

    SequenceFile.Reader r = new SequenceFile.Reader(fs, new Path(args[0]), conf);

    ChukwaArchiveKey key = new ChukwaArchiveKey();
    ChunkImpl chunk = ChunkImpl.getBlankChunk();
    try {
      while (r.next(key, chunk)) {
        System.out.println("\nTimePartition: " + key.getTimePartition());
        System.out.println("DataType: " + key.getDataType());
        System.out.println("StreamName: " + key.getStreamName());
        System.out.println("SeqId: " + key.getSeqId());
        System.out.println("\t\t =============== ");

        System.out.println("Cluster : " + chunk.getTags());
        System.out.println("DataType : " + chunk.getDataType());
        System.out.println("Source : " + chunk.getSource());
        System.out.println("Application : " + chunk.getApplication());
        System.out.println("SeqID : " + chunk.getSeqID());
        System.out.println("Data : " + new String(chunk.getData()));
      }
    } catch (Exception e) {
      e.printStackTrace();
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.chukwa.ChunkImpl

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.