Package com.linkedin.camus.etl.kafka.common

Examples of com.linkedin.camus.etl.kafka.common.EtlKey


        addOffset(key);
    }

    public void addOffset(EtlKey key) {
        String topicPart = key.getTopic() + "-" + key.getLeaderId() + "-" + key.getPartition();
        EtlKey offsetKey = new EtlKey(key);
       
        if (offsets.containsKey(topicPart)){
          long avgSize = offsets.get(topicPart).getMessageSize() * eventCounts.get(topicPart) + key.getMessageSize();
          avgSize /= eventCounts.get(topicPart) + 1;
          offsetKey.setMessageSize(avgSize);
        } else {
          eventCounts.put(topicPart, 0l);
        }
        eventCounts.put(topicPart, eventCounts.get(topicPart) + 1);
        offsets.put(topicPart, offsetKey);
View Full Code Here


    for (CamusRequest request : finalRequests) {
      if (moveLatest.contains(request.getTopic())
          || moveLatest.contains("all")) {
          log.info("Moving to latest for topic: " + request.getTopic());
        //TODO: factor out kafka specific request functionality
          EtlKey oldKey = offsetKeys.get(request);
          EtlKey newKey = new EtlKey(request.getTopic(), ((EtlRequest)request).getLeaderId(),
              request.getPartition(), 0, request
              .getLastOffset());
         
          if (oldKey != null)
            newKey.setMessageSize(oldKey.getMessageSize());
         
          offsetKeys.put(request, newKey);
      }

      EtlKey key = offsetKeys.get(request);

      if (key != null) {
        request.setOffset(key.getOffset());
        request.setAvgMsgSize(key.getMessageSize());
      }

      if (request.getEarliestOffset() > request.getOffset()
          || request.getOffset() > request.getLastOffset()) {
        if(request.getEarliestOffset() > request.getOffset())
        {
          log.error("The earliest offset was found to be more than the current offset: " + request);
          log.error("Moving to the earliest offset available");
        }
        else
        {
          log.error("The current offset was found to be more than the latest offset: " + request);
          log.error("Moving to the earliest offset available");
        }
        request.setOffset(request.getEarliestOffset());
        offsetKeys.put(
            request,
          //TODO: factor out kafka specific request functionality
            new EtlKey(request.getTopic(), ((EtlRequest)request).getLeaderId(),
                request.getPartition(), 0, request
                    .getOffset()));
      }
      log.info(request);
    }
View Full Code Here

      FileSystem fs = input.getFileSystem(context.getConfiguration());
      for (FileStatus f : fs.listStatus(input, new OffsetFileFilter())) {
        log.info("previous offset file:" + f.getPath().toString());
        SequenceFile.Reader reader = new SequenceFile.Reader(fs,
            f.getPath(), context.getConfiguration());
        EtlKey key = new EtlKey();
        while (reader.next(key, NullWritable.get())) {
        //TODO: factor out kafka specific request functionality
          CamusRequest request = new EtlRequest(context,
              key.getTopic(), key.getLeaderId(),
              key.getPartition());
          if (offsetKeysMap.containsKey(request)) {

            EtlKey oldKey = offsetKeysMap.get(request);
            if (oldKey.getOffset() < key.getOffset()) {
              offsetKeysMap.put(request, key);
            }
          } else {
            offsetKeysMap.put(request, key);
          }
          key = new EtlKey();
        }
        reader.close();
      }
    }
    return offsetKeysMap;
View Full Code Here

        assertTrue(actualResult.equals(expectedResult));
    }

    @Test
    public void testEncodedPartition() throws IOException {
        EtlKey testEtlKey = new EtlKey();
        testEtlKey.setTime(1400549463000L);
        Configuration testConfiguration = new Configuration();
        Job testJob = new Job(new Configuration());

        DefaultPartitioner testPartitioner = new DefaultPartitioner();
        testPartitioner.setConf(testConfiguration);
View Full Code Here

      SequenceFile.Reader reader = new SequenceFile.Reader(fs,
          f.getPath(), fs.getConf());

            String errorFrom = "\nError from file [" + f.getPath() + "]";

      EtlKey key = new EtlKey();
      ExceptionWritable value = new ExceptionWritable();

      while (reader.next(key, value)) {
                ExceptionWritable exceptionWritable = new ExceptionWritable(value.toString() + errorFrom);
                errors.put(new EtlKey(key), exceptionWritable);
      }
      reader.close();
    }

        return errors;
View Full Code Here

TOP

Related Classes of com.linkedin.camus.etl.kafka.common.EtlKey

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.