Package org.apache.hadoop.chukwa

Examples of org.apache.hadoop.chukwa.ChukwaArchiveKey


    long lastSeqId = -1;
    BufferedWriter out = null;
    try {
     
      reader = new SequenceFile.Reader(fs, new Path(dataSinkFile), conf);
      ChukwaArchiveKey key = new ChukwaArchiveKey();
      ChunkImpl chunk = ChunkImpl.getBlankChunk();

      String dataSinkDumpName = dataSinkFile + ".dump";
      out = new BufferedWriter(new FileWriter(dataSinkDumpName));
     
View Full Code Here


    SequenceFile.Reader r = new SequenceFile.Reader(fs, new Path(directory
        + fileName + ".done"), conf);

    File outputFile = new File(directory + fileName + ".raw");

    ChukwaArchiveKey key = new ChukwaArchiveKey();
    ChunkImpl chunk = ChunkImpl.getBlankChunk();
    FileWriter out = new FileWriter(outputFile);
    try {
      while (r.next(key, chunk)) {
        out.write(new String(chunk.getData()));
View Full Code Here

    SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, out,
        ChukwaArchiveKey.class, ChunkImpl.class,
        SequenceFile.CompressionType.NONE, null);
   
    for (ChunkImpl chunk: chunks) {
      ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
     
      calendar.set(Calendar.SECOND, 0);
      calendar.set(Calendar.MILLISECOND, 0);
      archiveKey.setTimePartition(calendar.getTimeInMillis());
     
      archiveKey.setDataType(chunk.getDataType());
      archiveKey.setStreamName(chunk.getStreamName());
      archiveKey.setSeqId(chunk.getSeqID());
      seqFileWriter.append(archiveKey, chunk);
    }
    seqFileWriter.close();
    out.close();
  }
View Full Code Here

      log.info("Collector not ready");
      throw new WriterException("Collector not ready");
    }

    if (chunks != null) {
      ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
     
      if (System.currentTimeMillis() >= nextTimePeriodComputation) {
        computeTimePeriod();
      }
      try {
        lock.acquire();
        for (Chunk chunk : chunks) {
          archiveKey.setTimePartition(timePeriod);
          archiveKey.setDataType(chunk.getDataType());
          archiveKey.setStreamName(chunk.getTags() + "/" + chunk.getSource()
              + "/" + chunk.getStreamName());
          archiveKey.setSeqId(chunk.getSeqID());

          if (chunk != null) {
            seqFileWriter.append(archiveKey, chunk);

            // compute size for stats only if append succeeded. Note though that
View Full Code Here

       ChunkImpl chunk = new ChunkImpl(dataType, streamName,
         line.length()  + lastSeqID, line.getBytes(), null);
       lastSeqID += line.length();
       chunk.addTag("cluster=\"" + clusterName + "\"");

       ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
       archiveKey.setTimePartition(System.currentTimeMillis());
       archiveKey.setDataType(chunk.getDataType());
       archiveKey.setStreamName(chunk.getStreamName());
       archiveKey.setSeqId(chunk.getSeqID());

       processor.process(archiveKey, chunk, collector, Reporter.NULL);
       seqFileWriter.append(collector.getChukwaRecordKey(),
                            collector.getChukwaRecord());
     }
View Full Code Here

    }
    long now = System.currentTimeMillis();
    if (chunks != null) {
      try {
        chunksWrittenThisRotate = true;
        ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();

        synchronized (lock) {
          if (System.currentTimeMillis() >= nextTimePeriodComputation) {
            computeTimePeriod();
          }

          for (Chunk chunk : chunks) {
            archiveKey.setTimePartition(timePeriod);
            archiveKey.setDataType(chunk.getDataType());
            archiveKey.setStreamName(chunk.getTags() + "/" + chunk.getSource()
                + "/" + chunk.getStreamName());
            archiveKey.setSeqId(chunk.getSeqID());

            if (chunk != null) {
              seqFileWriter.append(archiveKey, chunk);
              // compute size for stats
              dataSize += chunk.getData().length;
View Full Code Here

                table = t;
              }
            }
            if(table!=null) {
              HTableInterface hbase = pool.getTable(table.name().getBytes())
              processor.process(new ChukwaArchiveKey(), chunk, output, reporter);
              hbase.put(output.getKeyValues());
              pool.putTable(hbase);
            }
          } catch (Exception e) {
            log.warn(output.getKeyValues());
View Full Code Here

  SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, out,
                                      ChukwaArchiveKey.class, ChunkImpl.class,
                                      SequenceFile.CompressionType.NONE, null);
     
  for (ChunkImpl chunk: chunks) {
    ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
       
      calendar.set(Calendar.SECOND, 0);
      calendar.set(Calendar.MILLISECOND, 0);
      archiveKey.setTimePartition(calendar.getTimeInMillis());
     
      archiveKey.setDataType(chunk.getDataType());
      archiveKey.setStreamName(chunk.getStreamName());
      archiveKey.setSeqId(chunk.getSeqID());
      seqFileWriter.append(archiveKey, chunk);
  }
    seqFileWriter.close();
    out.close();
  }
View Full Code Here

  private static void dumpFile(Path p, Configuration conf,
      FileSystem fs) throws IOException {
    SequenceFile.Reader r = new SequenceFile.Reader(fs, p, conf);

    ChukwaArchiveKey key = new ChukwaArchiveKey();
    ChunkImpl chunk = ChunkImpl.getBlankChunk();
    try {
      while (r.next(key, chunk)) {
       
        String entryKey = chunk.getSource() +":"+chunk.getDataType() +":" +
        chunk.getStreamName();
       
        Integer oldC = counts.get(entryKey);
        if(oldC != null)
          counts.put(entryKey, oldC + 1);
        else
          counts.put(entryKey, new Integer(1));
       
        if(!summarize) {
          System.out.println("\nTimePartition: " + key.getTimePartition());
          System.out.println("DataType: " + key.getDataType());
          System.out.println("StreamName: " + key.getStreamName());
          System.out.println("SeqId: " + key.getSeqId());
          System.out.println("\t\t =============== ");
 
          System.out.println("Cluster : " + chunk.getTags());
          System.out.println("DataType : " + chunk.getDataType());
          System.out.println("Source : " + chunk.getSource());
View Full Code Here

         ChukwaArchiveKey.class, ChunkImpl.class,
         SequenceFile.CompressionType.NONE, null);
     RandSeqFileWriter rw = new RandSeqFileWriter();
     for (int i = 0; i < chunks; ++i) {
       ChunkImpl chunk = rw.getARandomChunk();
       ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();

       calendar.set(Calendar.YEAR, 2008);
       calendar.set(Calendar.MONTH, Calendar.MAY);
       calendar.set(Calendar.DAY_OF_MONTH, 29);
       calendar.set(Calendar.HOUR, 10);
       calendar.set(Calendar.MINUTE, 0);
       calendar.set(Calendar.SECOND, 0);
       calendar.set(Calendar.MILLISECOND, 0);
       archiveKey.setTimePartition(calendar.getTimeInMillis());
       archiveKey.setDataType(chunk.getDataType());
       archiveKey.setStreamName(chunk.getStreamName());
       archiveKey.setSeqId(chunk.getSeqID());
       seqFileWriter.append(archiveKey, chunk);
     }
     seqFileWriter.close();
     out.close();
   }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.chukwa.ChukwaArchiveKey

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.