Package kafka.message

Examples of kafka.message.Message


    List<Message> list = new ArrayList<Message>();
    for (int i = 0; i < _count; i++) {
      Long timestamp = RANDOM.nextLong();
      if (timestamp < 0) timestamp = -timestamp;
      byte[] bytes = timestamp.toString().getBytes("UTF8");
      Message message = new Message(bytes);
      list.add(message);
    }
    // send events
    System.out.println(" send " + list.size() + " " + _topic
        + " count events to " + producerId);
View Full Code Here


    while(true)
    {
      List<Message> messageList = new ArrayList<Message>();
      for(int i = 0; i < batchSize; i++)
      {
        Message message = new Message(new byte[messageSize]);
        messageList.add(message);
      }
      ByteBufferMessageSet set = new ByteBufferMessageSet(messageList);
      producer.send(topic, random.nextInt(numParts), set);
      bytesSent.getAndAdd(batchSize * messageSize);
 
View Full Code Here

        Iterator<Message> iter = (Iterator<Message>) messages
            .iterator();
        long messageOffset = 0;
        while (iter.hasNext()) {
          Message message = iter.next();

          messageOffset += MessageSet.entrySize(message);
          reporter.incrCounter("topic-counters", _topic, 1);
          _count++;

          try {
            tempTime = System.currentTimeMillis();
            _timestamp = getTimestamp(message);
            decodeTime += (System.currentTimeMillis() - tempTime);

          } catch (IOException e) {
            System.err.println("SetOffset=" + _offset
                + "messageOffset=" + messageOffset
                + ": ignore message with exception: ");

            if (_ignoreErrors) {
              reporter.incrCounter(_topic, _topic
                  + "_PARSING_ERROR", 1);
              continue;
            } else {
              e.printStackTrace(System.err);
              throw e;
            }
          }

          // determine whether to stop
          Status status = getStatus(message, reporter);

          // generate output
          switch (status) {
          case OUTPUT_AND_CONTINUE:
          case OUTPUT_AND_BREAK:
            tempTime = System.currentTimeMillis();
            ByteBuffer buffer = message.payload();
            byte[] bytes = new byte[buffer.remaining()];
            buffer.get(bytes, buffer.position(), bytes.length);
            collector.collect(new KafkaETLKey(_timestamp,
                _granularity), new BytesWritable(bytes));
            outputTime += (System.currentTimeMillis() - tempTime);
View Full Code Here

    reset(key, collector, reporter);

    while (values.hasNext()) {
      byte[] bytes = values.next().get();

      Message message = new Message(bytes);

      if (!filter(key, message, reporter))
        collector.collect(generateOutputKey(key, message),
            generateOutputValue(key, message));
View Full Code Here

  public void run() {
    int messageNo = 1;
    while(true)
    {
      String messageStr = new String("Message_" + messageNo);
      Message message = new Message(messageStr.getBytes());
      List<Message> messageList = new ArrayList<Message>();
      messageList.add(message);
      ByteBufferMessageSet set = new ByteBufferMessageSet(messageList);
      producer.send(topic, set);
      messageNo++;
View Full Code Here

       
        
        byte[] bytes = KafkaETLUtils.getBytes(val);
       
        //check the checksum of message
        Message message = new Message(ByteBuffer.wrap(bytes));
        long checksum = key.getChecksum();
        if (checksum != message.checksum())
            throw new IOException ("Invalid message checksum "
                                            + message.checksum() + ". Expected " + key + ".");
        Text data = getData (message);
        _count ++;
          
        collector.collect(new LongWritable (_count), data);
View Full Code Here

    }

    @Override
    public boolean nextKeyValue() throws IOException, InterruptedException {

        Message message = null;

        while (true) {
            try {
                if (reader == null || !reader.hasNext()) {
                    EtlRequest request = (EtlRequest) split.popRequest();
                    if (request == null) {
                        return false;
                    }

                    if (maxPullHours > 0) {
                        endTimeStamp = 0;
                    }

                    key.set(request.getTopic(), request.getLeaderId(), request.getPartition(),
                            request.getOffset(), request.getOffset(), 0);
                    value = null;
                    log.info("\n\ntopic:" + request.getTopic() + " partition:"
                            + request.getPartition() + " beginOffset:" + request.getOffset()
                            + " estimatedLastOffset:" + request.getLastOffset());

                    statusMsg += statusMsg.length() > 0 ? "; " : "";
                    statusMsg += request.getTopic() + ":" + request.getLeaderId() + ":"
                            + request.getPartition();
                    context.setStatus(statusMsg);

                    if (reader != null) {
                        closeReader();
                    }
                    reader = new KafkaReader(context, request,
                            CamusJob.getKafkaTimeoutValue(mapperContext),
                            CamusJob.getKafkaBufferSize(mapperContext));

                    decoder = MessageDecoderFactory.createMessageDecoder(context, request.getTopic());
                }
                int count = 0;
                while (reader.getNext(key, msgValue, msgKey)) {
                    readBytes += key.getMessageSize();
                    count++;
                    context.progress();
                    mapperContext.getCounter("total", "data-read").increment(msgValue.getLength());
                    mapperContext.getCounter("total", "event-count").increment(1);
                    byte[] bytes = getBytes(msgValue);
                    byte[] keyBytes = getBytes(msgKey);
                    // check the checksum of message.
                    // If message has partition key, need to construct it with Key for checkSum to match
                    Message messageWithKey = new Message(bytes,keyBytes);
                    Message messageWithoutKey = new Message(bytes);
                    long checksum = key.getChecksum();
                    if (checksum != messageWithKey.checksum() && checksum != messageWithoutKey.checksum()) {
                      throw new ChecksumException("Invalid message checksum : MessageWithKey : "
                              + messageWithKey.checksum() + " MessageWithoutKey checksum : "
                          + messageWithoutKey.checksum()
                          + ". Expected " + key.getChecksum()
                          key.getOffset());
                    }

                    long tempTime = System.currentTimeMillis();
View Full Code Here

   */
  public boolean getNext(EtlKey key, BytesWritable payload ,BytesWritable pKey) throws IOException {
    if (hasNext()) {

      MessageAndOffset msgAndOffset = messageIter.next();
      Message message = msgAndOffset.message();

      ByteBuffer buf = message.payload();
      int origSize = buf.remaining();
      byte[] bytes = new byte[origSize];
      buf.get(bytes, buf.position(), origSize);
      payload.set(bytes, 0, origSize);

      buf = message.key();
      if(buf != null){
        origSize = buf.remaining();
        bytes = new byte[origSize];
        buf.get(bytes, buf.position(), origSize);
        pKey.set(bytes, 0, origSize);
      }

      key.clear();
      key.set(kafkaRequest.getTopic(), kafkaRequest.getLeaderId(),
          kafkaRequest.getPartition(), currentOffset,
          msgAndOffset.offset() + 1, message.checksum());
     
      key.setMessageSize(msgAndOffset.message().size());

      currentOffset = msgAndOffset.offset() + 1; // increase offset
      currentCount++; // increase count
View Full Code Here

        }

        if (iterator.hasNext())
        {
            MessageAndOffset messageOffset = iterator.next();
            Message message = messageOffset.message();
            key.set(watermark - message.size() - 4);
            value.set(message.payload().array(), message.payload().arrayOffset(), message.payloadSize());
            numProcessedMessages++;
            if (!iterator.hasNext())
            {
                messages = null;
                iterator = null;
View Full Code Here

       
        
        byte[] bytes = KafkaETLUtils.getBytes(val);
       
        //check the checksum of message
        Message message = new Message(bytes);
        long checksum = key.getChecksum();
        if (checksum != message.checksum())
            throw new IOException ("Invalid message checksum "
                                            + message.checksum() + ". Expected " + key + ".");
        Text data = getData (message);
        _count ++;
          
        collector.collect(new LongWritable (_count), data);
View Full Code Here

TOP

Related Classes of kafka.message.Message

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.