Examples of ChukwaArchiveKey


Examples of org.apache.hadoop.chukwa.ChukwaArchiveKey

     Path recoverDonePath= new Path(recoverDoneDir);
    String doneFileName=originalFileName.substring(0, extensionIndex)+".done";
    String doneDir= originalFileDir + doneFileName;
    Path donePath= new Path(doneDir);
   
    ChukwaArchiveKey key = new ChukwaArchiveKey();
      ChunkImpl evt = ChunkImpl.getBlankChunk();

    newOutputStr = localFs.create(recoverPath);
      seqFileWriter = SequenceFile.createWriter(conf, newOutputStr,
                                                ChukwaArchiveKey.class, ChunkImpl.class,
View Full Code Here

Examples of org.apache.hadoop.chukwa.ChukwaArchiveKey

    SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, out,
        ChukwaArchiveKey.class, ChunkImpl.class,
        SequenceFile.CompressionType.NONE, null);
    for (int i = 0; i < chunks; ++i) {
      ChunkImpl chunk = getARandomChunk();
      ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
      // FIXME compute this once an hour
      calendar.setTimeInMillis(System.currentTimeMillis());
      calendar.set(Calendar.MINUTE, 0);
      calendar.set(Calendar.SECOND, 0);
      calendar.set(Calendar.MILLISECOND, 0);
      archiveKey.setTimePartition(calendar.getTimeInMillis());
      archiveKey.setDataType(chunk.getDataType());
      archiveKey.setStreamName(chunk.getStreamName());
      archiveKey.setSeqId(chunk.getSeqID());
      seqFileWriter.append(archiveKey, chunk);
    }
    seqFileWriter.close();
    out.close();
  }
View Full Code Here

Examples of org.apache.hadoop.chukwa.ChukwaArchiveKey

        if(!fstatus.getPath().getName().endsWith(".done"))
          continue;
       
        SequenceFile.Reader reader = new SequenceFile.Reader(localfs, fstatus.getPath(), conf);

        ChukwaArchiveKey key = new ChukwaArchiveKey();
        ChunkImpl chunk = ChunkImpl.getBlankChunk();

        while (reader.next(key, chunk)) {
         bytes.add(new ByteRange(chunk));
        }
View Full Code Here

Examples of org.apache.hadoop.chukwa.ChukwaArchiveKey

  protected String dumpArchive(FileSystem fs,Configuration conf, String file) throws Throwable {
    SequenceFile.Reader reader = null;
    try {
      reader = new SequenceFile.Reader(fs, new Path(file), conf);

      ChukwaArchiveKey key = new ChukwaArchiveKey();
      ChunkImpl chunk = ChunkImpl.getBlankChunk();

      StringBuilder sb = new StringBuilder();
      while (reader.next(key, chunk)) {
        sb.append("\nTimePartition: " + key.getTimePartition());
        sb.append("DataType: " + key.getDataType());
        sb.append("StreamName: " + key.getStreamName());
        sb.append("SeqId: " + key.getSeqId());
        sb.append("\t\t =============== ");

        sb.append("Cluster : " + chunk.getTags());
        sb.append("DataType : " + chunk.getDataType());
        sb.append("Source : " + chunk.getSource());
View Full Code Here

Examples of org.apache.hadoop.chukwa.ChukwaArchiveKey

        fs = FileSystem.get(new URI(fsURL), conf);
      SequenceFile.Reader r = new SequenceFile.Reader(fs, new Path(fname), conf);
      System.out.println("key class name is " + r.getKeyClassName());
      System.out.println("value class name is " + r.getValueClassName());

      ChukwaArchiveKey key = new ChukwaArchiveKey();
      ChunkImpl evt = ChunkImpl.getBlankChunk();
      int events = 0;
      while (r.next(key, evt) && (events < 5)) {
        if (!Writable.class.isAssignableFrom(key.getClass()))
          System.out.println("warning: keys aren't writable");

        if (!Writable.class.isAssignableFrom(evt.getClass()))
          System.out.println("warning: values aren't writable");
View Full Code Here

Examples of org.apache.hadoop.chukwa.ChukwaArchiveKey

    ChunkImpl chunk = (ChunkImpl)cb.getChunk();

    ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord> output =
            new ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord>();

    mapper.map(new ChukwaArchiveKey(), chunk, output, Reporter.NULL);
    ChukwaRecordKey recordKey = new ChukwaRecordKey("someReduceType", SAMPLE_RECORD_DATA);

    assertEquals("MockMapProcessor never invoked - no records found", 1, output.data.size());
    assertNotNull("MockMapProcessor never invoked", output.data.get(recordKey));
  }
View Full Code Here

Examples of org.apache.hadoop.chukwa.ChukwaArchiveKey

      // Uncompressed for now
      SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, currentOutputStr,
          ChukwaArchiveKey.class, ChunkImpl.class,
          SequenceFile.CompressionType.NONE, null);
   
      ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
     
      if (System.currentTimeMillis() >= nextTimePeriodComputation) {
        computeTimePeriod();
      }

      for (Chunk chunk : chunks) {
        archiveKey.setTimePartition(timePeriod);
        archiveKey.setDataType(chunk.getDataType());
        archiveKey.setStreamName(chunk.getTags() + "/" + chunk.getSource()
            + "/" + chunk.getStreamName());
        archiveKey.setSeqId(chunk.getSeqID());

        if (chunk != null) {
          // compute size for stats
          dataSize += chunk.getData().length;
          bytesThisRotate += chunk.getData().length;
View Full Code Here

Examples of org.apache.hadoop.chukwa.ChukwaArchiveKey

        if(!fstatus.getPath().getName().endsWith(".done"))
          continue;
       
        SequenceFile.Reader reader = new SequenceFile.Reader(localfs, fstatus.getPath(), conf);

        ChukwaArchiveKey key = new ChukwaArchiveKey();
        ChunkImpl chunk = ChunkImpl.getBlankChunk();

        while (reader.next(key, chunk)) {
         bytes.add(new ByteRange(chunk));
        }
View Full Code Here

Examples of org.apache.hadoop.chukwa.ChukwaArchiveKey

    long lastSeqId = -1;
    BufferedWriter out = null;
    try {
     
      reader = new SequenceFile.Reader(fs, new Path(dataSinkFile), conf);
      ChukwaArchiveKey key = new ChukwaArchiveKey();
      ChunkImpl chunk = ChunkImpl.getBlankChunk();

      String dataSinkDumpName = dataSinkFile + ".dump";
      out = new BufferedWriter(new FileWriter(dataSinkDumpName));
     
View Full Code Here

Examples of org.apache.hadoop.chukwa.ChukwaArchiveKey

  protected String dumpArchive(FileSystem fs,Configuration conf, String file) throws Throwable {
    SequenceFile.Reader reader = null;
    try {
      reader = new SequenceFile.Reader(fs, new Path(file), conf);

      ChukwaArchiveKey key = new ChukwaArchiveKey();
      ChunkImpl chunk = ChunkImpl.getBlankChunk();

      StringBuilder sb = new StringBuilder();
      while (reader.next(key, chunk)) {
        sb.append("\nTimePartition: " + key.getTimePartition());
        sb.append("DataType: " + key.getDataType());
        sb.append("StreamName: " + key.getStreamName());
        sb.append("SeqId: " + key.getSeqId());
        sb.append("\t\t =============== ");

        sb.append("Cluster : " + chunk.getTags());
        sb.append("DataType : " + chunk.getDataType());
        sb.append("Source : " + chunk.getSource());
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.