Package org.apache.hadoop.chukwa

Examples of org.apache.hadoop.chukwa.ChunkImpl


    BufferedWriter out = null;
    try {
     
      reader = new SequenceFile.Reader(fs, new Path(dataSinkFile), conf);
      ChukwaArchiveKey key = new ChukwaArchiveKey();
      ChunkImpl chunk = ChunkImpl.getBlankChunk();

      String dataSinkDumpName = dataSinkFile + ".dump";
      out = new BufferedWriter(new FileWriter(dataSinkDumpName));
     


      while (reader.next(key, chunk)) {
        Assert.assertTrue(cluster.equals(RecordUtil.getClusterName(chunk)));
        Assert.assertTrue(dataType.equals(chunk.getDataType()));
        Assert.assertTrue(source.equals(chunk.getSource()));
       
        out.write(new String(chunk.getData()));
        lastSeqId = chunk.getSeqID() ;
      }
     
      out.close();
      out = null;
      reader.close();
View Full Code Here


        + fileName + ".done"), conf);

    File outputFile = new File(directory + fileName + ".raw");

    ChukwaArchiveKey key = new ChukwaArchiveKey();
    ChunkImpl chunk = ChunkImpl.getBlankChunk();
    FileWriter out = new FileWriter(outputFile);
    try {
      while (r.next(key, chunk)) {
        out.write(new String(chunk.getData()));
      }
    } finally {
      out.close();
      r.close();
    }
View Full Code Here

      List<Chunk> events = new LinkedList<Chunk>();
      StringBuilder sb = new StringBuilder();

      for (int i = 0; i < numEvents; i++) {
        ChunkImpl logEvent = ChunkImpl.read(di);
        events.add(logEvent);

        if (FANCY_DIAGNOSTICS) {
          diagnosticPage.sawChunk(logEvent, i);
        }
View Full Code Here

      }
      json.put("disk", fsList);
      json.put("timestamp", System.currentTimeMillis());
      byte[] data = json.toString().getBytes();
      sendOffset += data.length;
      ChunkImpl c = new ChunkImpl("SystemMetrics", "Sigar", sendOffset, data, systemMetrics);
      if(!skip) {
        receiver.add(c);
      }
    } catch (Exception se) {
      log.error(ExceptionUtil.getStackTrace(se));
View Full Code Here

  protected int extractRecords(ChunkReceiver eq, long buffOffsetInFile,
      byte[] buf) throws InterruptedException {
    if(buf.length == 0)
      return 0;
   
    ChunkImpl chunk = new ChunkImpl(type, toWatch.getAbsolutePath(),
        buffOffsetInFile + buf.length, buf, this);

    eq.add(chunk);
    return buf.length;
  }
View Full Code Here

        offsets_i[i] = offsets.get(i);

      int bytesUsed = offsets_i[offsets_i.length - 1] + 1; // char at last
                                                           // offset uses a byte
      assert bytesUsed > 0 : " shouldn't send empty events";
      ChunkImpl event = new ChunkImpl(type, toWatch.getAbsolutePath(),
          buffOffsetInFile + bytesUsed, buf, this);

      event.setRecordOffsets(offsets_i);
      eq.add(event);

      offsets.clear();
      return bytesUsed;
    } else
View Full Code Here

        offsets_i[i] = offsets.get(i);
      // make the stream unique to this adaptor
      int bytesUsed = offsets_i[offsets_i.length - 1] + 1; // char at last
                                                           // offset uses a byte
      assert bytesUsed > 0 : " shouldn't send empty events";
      ChunkImpl chunk = new ChunkImpl(type, toWatch.getAbsolutePath(),
          buffOffsetInFile + bytesUsed, buf, this);

      chunk.setSeqID(buffOffsetInFile + bytesUsed);
      chunk.setRecordOffsets(offsets_i);
      eq.add(chunk);

      offsets.clear();
      return bytesUsed;
    } else
View Full Code Here

    try {
      psw.init(conf);
      // Send a client trace chunk
      ArrayList<Chunk> l = new ArrayList<Chunk>();
      String line = "2009-12-29 22:32:27,047 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /10.10.100.60:43707, dest: /10.10.100.60:50010, bytes: 7003141, op: HDFS_WRITE, cliID: DFSClient_-8389654, offset: 0, srvID: DS-2032680158-98.137.100.60-50010-1259976007324, blockid: blk_-2723720761101769540_705411, duration: 289013780000";     
      l.add(new ChunkImpl("ClientTrace", "name", 1, line.getBytes(), null));
      assertTrue(l.size()==1);
      psw.add(l);
      assertTrue(true);
    } catch (WriterException e) {
      fail(ExceptionUtil.getStackTrace(e));
View Full Code Here

    SocketDataLoader sdl = new SocketDataLoader("all");
   
    System.out.println("pipeline established; now pushing a chunk");
    ArrayList<Chunk> l = new ArrayList<Chunk>();
    l.add(new ChunkImpl("dt", "name", 1, new byte[] {'a'}, null));
    psw.add(l);
    //push a chunk through. SocketDataLoader should receive this chunk.
   
    try {
      Collection<Chunk> clist = sdl.read();
View Full Code Here

    Configuration conf = new Configuration();
    Path path = new Path(tmpFile.getAbsolutePath());
    List<ChunkImpl> chunks = new ArrayList<ChunkImpl>();
    byte[] dat = "test".getBytes();
   
    ChunkImpl c = new ChunkImpl("Data", "aname", dat.length, dat, null);
    chunks.add(c);
   
    dat = "ing".getBytes();
    c = new ChunkImpl("Data", "aname", dat.length+4, dat, null);
    chunks.add(c);
   
    writeSeqFile(conf, FileSystem.getLocal(conf), path, chunks);
   
    String[] args = new String[] {"datatype=Data",path.toString()};
View Full Code Here

TOP

Related Classes of org.apache.hadoop.chukwa.ChunkImpl

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.