Package org.apache.hadoop.chukwa

Examples of org.apache.hadoop.chukwa.ChunkImpl


   * @throws InterruptedException
   */
  protected int extractRecords(ChunkReceiver eq, long buffOffsetInFile, byte[] buf)
      throws InterruptedException
  {
    ChunkImpl chunk = new ChunkImpl(type, toWatch.getAbsolutePath(), buffOffsetInFile + buf.length,
        buf, this);

    eq.add(chunk);
    return buf.length;
  }
View Full Code Here


        for(int i = 0; i < offsets_i.length ; ++i)
          offsets_i[i] = offsets.get(i);
     
        int bytesUsed = offsets_i[offsets_i.length-1+ 1; //char at last offset uses a byte
        assert bytesUsed > 0: " shouldn't send empty events";
        ChunkImpl event = new ChunkImpl(type, toWatch.getAbsolutePath(),buffOffsetInFile + bytesUsed, buf, this );

        event.setRecordOffsets(offsets_i);
        eq.add(event);
       
        offsets.clear();
        return bytesUsed;
      }
View Full Code Here

      for(int i = 0; i < offsets_i.length ; ++i)
        offsets_i[i] = offsets.get(i);
      //make the stream unique to this adaptor
      int bytesUsed = offsets_i[offsets_i.length-1+ 1; //char at last offset uses a byte
      assert bytesUsed > 0: " shouldn't send empty events";
      ChunkImpl chunk = new ChunkImpl(type, toWatch.getAbsolutePath(),
           buffOffsetInFile + bytesUsed, buf,this);
     
      chunk.setSeqID(buffOffsetInFile + bytesUsed);
      chunk.setRecordOffsets(offsets_i);
      eq.add(chunk);
     
      offsets.clear();
      return bytesUsed;
    }
View Full Code Here

    SequenceFile.Reader r= new SequenceFile.Reader(fs, new Path(fname), conf);
    System.out.println("key class name is " + r.getKeyClassName());
    System.out.println("value class name is " + r.getValueClassName());
   
    ChukwaArchiveKey key = new ChukwaArchiveKey();
    ChunkImpl evt =  ChunkImpl.getBlankChunk();
    int events = 0;
    while(r.next(key, evt) &&  (events < 5))
    {
      if(!Writable.class.isAssignableFrom(key.getClass()))
        System.out.println("warning: keys aren't writable");
     
      if(!Writable.class.isAssignableFrom(evt.getClass()))
        System.out.println("warning: values aren't writable");
     
      if(evt.getData().length > 1000)
      {
        System.out.println("got event; data: " + new String(evt.getData(), 0, 1000));
        System.out.println("....[truncating]");
      }
      else
        System.out.println("got event; data: " + new String(evt.getData()));
      events ++;
    }
    System.out.println("file looks OK!");
    }
    catch(Exception e)
View Full Code Here

          //FIXME: I think there's still a risk of integer overflow here
        int arraySize = (int) (MSToSleep * (long) bytesPerSec / 1000L);
        byte[] data = new byte[ arraySize];
        r.nextBytes(data);
        offset += data.length;
        ChunkImpl evt = new ChunkImpl(type,"random data source",  offset, data , this);

        dest.add(evt);
       
        Thread.sleep(MSToSleep);
      } //end while
View Full Code Here

    try{
      while(!stopping) {
        byte[] data = new byte[ BUFFER_SIZE];
        r.nextBytes(data);
        offset += data.length;
        ChunkImpl evt = new ChunkImpl(type, "random data source", offset, data, this);
        dest.add(evt);
       
      }
    }  catch(InterruptedException ie)
    {}
View Full Code Here

          continue;
       
        SequenceFile.Reader reader = new SequenceFile.Reader(localfs, fstatus.getPath(), conf);

        ChukwaArchiveKey key = new ChukwaArchiveKey();
        ChunkImpl chunk = ChunkImpl.getBlankChunk();

        while (reader.next(key, chunk)) {
         bytes.add(new ByteRange(chunk));
        }
        reader.close();
View Full Code Here

      if (FANCY_DIAGNOSTICS) {
        diagnosticPage.sawPost(req.getRemoteHost(), numEvents, currentTime);
      }

      List<Chunk> events = new LinkedList<Chunk>();
      ChunkImpl logEvent = null;
      StringBuilder sb = new StringBuilder();

      for (int i = 0; i < numEvents; i++) {
        // TODO: pass new data to all registered stream handler
        // methods for this chunk's stream
        // TODO: should really have some dynamic assignment of events to writers

        logEvent = ChunkImpl.read(di);
        sb.append("ok:");
        sb.append(logEvent.getData().length);
        sb.append(" bytes ending at offset ");
        sb.append(logEvent.getSeqID() - 1).append("\n");

        events.add(logEvent);

        if (FANCY_DIAGNOSTICS) {
          diagnosticPage.sawChunk(logEvent, i);
View Full Code Here

   * @return the number of bytes processed
   * @throws InterruptedException
   */
  protected int extractRecords(final ChunkReceiver eq, long buffOffsetInFile,
      byte[] buf, long fileTime) throws InterruptedException {
    final ChunkImpl chunk = new ChunkImpl(type, toWatch.getAbsolutePath(),
        buffOffsetInFile + buf.length, buf, this);
    chunk.addTag("time=\"" + fileTime + "\"");
    log.info("Adding " + toWatch.getAbsolutePath() + " to the queue");
    eq.add(chunk);
    log.info( toWatch.getAbsolutePath() + " added to the queue");
    return buf.length;
  }
View Full Code Here

          String stdout = o.getString("stdout");
          data = stdout.getBytes();
        }

        sendOffset += data.length;
        ChunkImpl c = new ChunkImpl(ExecAdaptor.this.type, "results from "
            + cmd, sendOffset, data, ExecAdaptor.this);

        if (SPLIT_LINES) {
          ArrayList<Integer> carriageReturns = new ArrayList<Integer>();
          for (int i = 0; i < data.length; ++i)
            if (data[i] == '\n')
              carriageReturns.add(i);

          c.setRecordOffsets(carriageReturns);
        } // else we get default one record

        dest.add(c);
      } catch (JSONException e) {
        // FIXME: log this somewhere
View Full Code Here

TOP

Related Classes of org.apache.hadoop.chukwa.ChunkImpl

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.