Package org.apache.hadoop.chukwa

Examples of org.apache.hadoop.chukwa.ChukwaArchiveKey


  public void add(Chunk chunk) throws WriterException {

    if (chunk != null) {
      try {
        ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();

        // FIXME compute this once an hour
        synchronized (calendar) {
          calendar.setTimeInMillis(System.currentTimeMillis());
          calendar.set(Calendar.MINUTE, 0);
          calendar.set(Calendar.SECOND, 0);
          calendar.set(Calendar.MILLISECOND, 0);

          archiveKey.setTimePartition(calendar.getTimeInMillis());
        }

        archiveKey.setDataType(chunk.getDataType());
        archiveKey.setStreamName(chunk.getTags() + "/" + chunk.getSource()
            + "/" + chunk.getStreamName());
        archiveKey.setSeqId(chunk.getSeqID());

        ClientAck localClientAck = null;
        synchronized (lock) {
          localClientAck = SeqFileWriter.clientAck;
          log.info("[" + Thread.currentThread().getName()
View Full Code Here


    SequenceFile.Reader r = new SequenceFile.Reader(fs, new Path(directory
        + fileName + ".done"), conf);

    File outputFile = new File(directory + fileName + ".raw");

    ChukwaArchiveKey key = new ChukwaArchiveKey();
    ChunkImpl chunk = ChunkImpl.getBlankChunk();
    FileWriter out = new FileWriter(outputFile);
    try {
      while (r.next(key, chunk)) {
        out.write(new String(chunk.getData()));
View Full Code Here

    SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, out,
        ChukwaArchiveKey.class, ChunkImpl.class,
        SequenceFile.CompressionType.NONE, null);
   
    for (ChunkImpl chunk: chunks) {
      ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
     
      calendar.set(Calendar.SECOND, 0);
      calendar.set(Calendar.MILLISECOND, 0);
      archiveKey.setTimePartition(calendar.getTimeInMillis());
     
      archiveKey.setDataType(chunk.getDataType());
      archiveKey.setStreamName(chunk.getStreamName());
      archiveKey.setSeqId(chunk.getSeqID());
      seqFileWriter.append(archiveKey, chunk);
    }
    seqFileWriter.close();
    out.close();
  }
View Full Code Here

    ChunkImpl chunk = (ChunkImpl)cb.getChunk();

    ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord> output =
            new ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord>();

    mapper.map(new ChukwaArchiveKey(), chunk, output, Reporter.NULL);
    ChukwaRecordKey recordKey = new ChukwaRecordKey("someReduceType", SAMPLE_RECORD_DATA);

    assertEquals("MockMapProcessor never invoked - no records found", 1, output.data.size());
    assertNotNull("MockMapProcessor never invoked", output.data.get(recordKey));
  }
View Full Code Here

    chunk.setDataType(custom_DataType);

    ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord> output =
            new ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord>();

    mapper.map(new ChukwaArchiveKey(), chunk, output, Reporter.NULL);
    ChukwaRecordKey recordKey = new ChukwaRecordKey("someReduceType", SAMPLE_RECORD_DATA);

    assertEquals("MockMapProcessor never invoked - no records found", 1, output.data.size());
    assertNotNull("MockMapProcessor never invoked", output.data.get(recordKey));
  }
View Full Code Here

  protected String dumpArchive(FileSystem fs,Configuration conf, String file) throws Throwable {
    SequenceFile.Reader reader = null;
    try {
      reader = new SequenceFile.Reader(fs, new Path(file), conf);

      ChukwaArchiveKey key = new ChukwaArchiveKey();
      ChunkImpl chunk = ChunkImpl.getBlankChunk();

      StringBuilder sb = new StringBuilder();
      while (reader.next(key, chunk)) {
        sb.append("\nTimePartition: " + key.getTimePartition());
        sb.append("DataType: " + key.getDataType());
        sb.append("StreamName: " + key.getStreamName());
        sb.append("SeqId: " + key.getSeqId());
        sb.append("\t\t =============== ");

        sb.append("Cluster : " + chunk.getTags());
        sb.append("DataType : " + chunk.getDataType());
        sb.append("Source : " + chunk.getSource());
View Full Code Here

            Table table = findHBaseTable(chunk.getDataType());

            if(table!=null) {
              HTableInterface hbase = pool.getTable(table.name().getBytes());
              MapProcessor processor = getProcessor(chunk.getDataType());
              processor.process(new ChukwaArchiveKey(), chunk, output, reporter);

              hbase.put(output.getKeyValues());
              pool.putTable(hbase);
            }
          } catch (Exception e) {
View Full Code Here

   * @return
   */
  private String testProcessor(AbstractProcessor p, JSONObject inData,
      Chunk chunk) {
    ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord> output = new ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord>();
    p.process(new ChukwaArchiveKey(), chunk, output, null);
    HashMap<ChukwaRecordKey, ChukwaRecord> outData = output.data;

    // First get all ChukwaRecords and then get all field-data pairs within
    // each record
    Iterator<Entry<ChukwaRecordKey, ChukwaRecord>> recordIter = outData
View Full Code Here

       ChunkImpl chunk = new ChunkImpl(dataType, streamName,
         line.length()  + lastSeqID, line.getBytes(), null);
       lastSeqID += line.length();
       chunk.addTag("cluster=\"" + clusterName + "\"");

       ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
       archiveKey.setTimePartition(System.currentTimeMillis());
       archiveKey.setDataType(chunk.getDataType());
       archiveKey.setStreamName(chunk.getStreamName());
       archiveKey.setSeqId(chunk.getSeqID());

       processor.process(archiveKey, chunk, collector, null);
       seqFileWriter.append(collector.getChukwaRecordKey(),
                            collector.getChukwaRecord());
     }
View Full Code Here

      // Uncompressed for now
      SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, currentOutputStr,
          ChukwaArchiveKey.class, ChunkImpl.class,
          SequenceFile.CompressionType.NONE, null);
   
      ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
     
      if (System.currentTimeMillis() >= nextTimePeriodComputation) {
        computeTimePeriod();
      }

      for (Chunk chunk : chunks) {
        archiveKey.setTimePartition(timePeriod);
        archiveKey.setDataType(chunk.getDataType());
        archiveKey.setStreamName(chunk.getTags() + "/" + chunk.getSource()
            + "/" + chunk.getStreamName());
        archiveKey.setSeqId(chunk.getSeqID());

        if (chunk != null) {
          // compute size for stats
          dataSize += chunk.getData().length;
          bytesThisRotate += chunk.getData().length;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.chukwa.ChukwaArchiveKey

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.