Package org.apache.kafka.common.protocol.types

Examples of org.apache.kafka.common.protocol.types.Struct


     * @param groupId
     * @param offsetData
     */
    @Deprecated
    public OffsetCommitRequest(String groupId, Map<TopicPartition, PartitionData> offsetData) {
        super(new Struct(ProtoUtils.requestSchema(ApiKeys.OFFSET_COMMIT.id, 0)));
        initCommonFields(groupId, offsetData);
        this.groupId = groupId;
        this.generationId = DEFAULT_GENERATION_ID;
        this.consumerId = DEFAULT_CONSUMER_ID;
        this.offsetData = offsetData;
View Full Code Here


     * @param generationId
     * @param consumerId
     * @param offsetData
     */
    public OffsetCommitRequest(String groupId, int generationId, String consumerId, Map<TopicPartition, PartitionData> offsetData) {
        super(new Struct(curSchema));

        initCommonFields(groupId, offsetData);
        struct.set(GENERATION_ID_KEY_NAME, generationId);
        struct.set(CONSUMER_ID_KEY_NAME, consumerId);
        this.groupId = groupId;
View Full Code Here

        Map<String, Map<Integer, PartitionData>> topicsData = CollectionUtils.groupDataByTopic(offsetData);

        struct.set(GROUP_ID_KEY_NAME, groupId);
        List<Struct> topicArray = new ArrayList<Struct>();
        for (Map.Entry<String, Map<Integer, PartitionData>> topicEntry: topicsData.entrySet()) {
            Struct topicData = struct.instance(TOPICS_KEY_NAME);
            topicData.set(TOPIC_KEY_NAME, topicEntry.getKey());
            List<Struct> partitionArray = new ArrayList<Struct>();
            for (Map.Entry<Integer, PartitionData> partitionEntry : topicEntry.getValue().entrySet()) {
                PartitionData fetchPartitionData = partitionEntry.getValue();
                Struct partitionData = topicData.instance(PARTITIONS_KEY_NAME);
                partitionData.set(PARTITION_KEY_NAME, partitionEntry.getKey());
                partitionData.set(COMMIT_OFFSET_KEY_NAME, fetchPartitionData.offset);
                partitionData.set(TIMESTAMP_KEY_NAME, fetchPartitionData.timestamp);
                partitionData.set(METADATA_KEY_NAME, fetchPartitionData.metadata);
                partitionArray.add(partitionData);
            }
            topicData.set(PARTITIONS_KEY_NAME, partitionArray.toArray());
            topicArray.add(topicData);
        }
View Full Code Here

    public OffsetCommitRequest(Struct struct) {
        super(struct);
        offsetData = new HashMap<TopicPartition, PartitionData>();
        for (Object topicResponseObj : struct.getArray(TOPICS_KEY_NAME)) {
            Struct topicResponse = (Struct) topicResponseObj;
            String topic = topicResponse.getString(TOPIC_KEY_NAME);
            for (Object partitionResponseObj : topicResponse.getArray(PARTITIONS_KEY_NAME)) {
                Struct partitionResponse = (Struct) partitionResponseObj;
                int partition = partitionResponse.getInt(PARTITION_KEY_NAME);
                long offset = partitionResponse.getLong(COMMIT_OFFSET_KEY_NAME);
                long timestamp = partitionResponse.getLong(TIMESTAMP_KEY_NAME);
                String metadata = partitionResponse.getString(METADATA_KEY_NAME);
                PartitionData partitionData = new PartitionData(offset, timestamp, metadata);
                offsetData.put(new TopicPartition(topic, partition), partitionData);
            }
        }
        groupId = struct.getString(GROUP_ID_KEY_NAME);
View Full Code Here

    private final short acks;
    private final int timeout;
    private final Map<TopicPartition, ByteBuffer> partitionRecords;

    public ProduceRequest(short acks, int timeout, Map<TopicPartition, ByteBuffer> partitionRecords) {
        super(new Struct(curSchema));
        Map<String, Map<Integer, ByteBuffer>> recordsByTopic = CollectionUtils.groupDataByTopic(partitionRecords);
        struct.set(ACKS_KEY_NAME, acks);
        struct.set(TIMEOUT_KEY_NAME, timeout);
        List<Struct> topicDatas = new ArrayList<Struct>(recordsByTopic.size());
        for (Map.Entry<String, Map<Integer, ByteBuffer>> entry : recordsByTopic.entrySet()) {
            Struct topicData = struct.instance(TOPIC_DATA_KEY_NAME);
            topicData.set(TOPIC_KEY_NAME, entry.getKey());
            List<Struct> partitionArray = new ArrayList<Struct>();
            for (Map.Entry<Integer, ByteBuffer> partitionEntry : entry.getValue().entrySet()) {
                ByteBuffer buffer = partitionEntry.getValue().duplicate();
                Struct part = topicData.instance(PARTITION_DATA_KEY_NAME)
                                       .set(PARTITION_KEY_NAME, partitionEntry.getKey())
                                       .set(RECORD_SET_KEY_NAME, buffer);
                partitionArray.add(part);
            }
            topicData.set(PARTITION_DATA_KEY_NAME, partitionArray.toArray());
View Full Code Here

    public ProduceRequest(Struct struct) {
        super(struct);
        partitionRecords = new HashMap<TopicPartition, ByteBuffer>();
        for (Object topicDataObj : struct.getArray(TOPIC_DATA_KEY_NAME)) {
            Struct topicData = (Struct) topicDataObj;
            String topic = topicData.getString(TOPIC_KEY_NAME);
            for (Object partitionResponseObj : topicData.getArray(PARTITION_DATA_KEY_NAME)) {
                Struct partitionResponse = (Struct) partitionResponseObj;
                int partition = partitionResponse.getInt(PARTITION_KEY_NAME);
                ByteBuffer records = partitionResponse.getBytes(RECORD_SET_KEY_NAME);
                partitionRecords.put(new TopicPartition(topic, partition), records);
            }
        }
        acks = struct.getShort(ACKS_KEY_NAME);
        timeout = struct.getInt(TIMEOUT_KEY_NAME);
View Full Code Here

    private static String ERROR_CODE_KEY_NAME = "error_code";

    private final Map<TopicPartition, Short> responseData;

    public OffsetCommitResponse(Map<TopicPartition, Short> responseData) {
        super(new Struct(curSchema));

        Map<String, Map<Integer, Short>> topicsData = CollectionUtils.groupDataByTopic(responseData);

        List<Struct> topicArray = new ArrayList<Struct>();
        for (Map.Entry<String, Map<Integer, Short>> entries: topicsData.entrySet()) {
            Struct topicData = struct.instance(RESPONSES_KEY_NAME);
            topicData.set(TOPIC_KEY_NAME, entries.getKey());
            List<Struct> partitionArray = new ArrayList<Struct>();
            for (Map.Entry<Integer, Short> partitionEntry : entries.getValue().entrySet()) {
                Struct partitionData = topicData.instance(PARTITIONS_KEY_NAME);
                partitionData.set(PARTITION_KEY_NAME, partitionEntry.getKey());
                partitionData.set(ERROR_CODE_KEY_NAME, partitionEntry.getValue());
                partitionArray.add(partitionData);
            }
            topicData.set(PARTITIONS_KEY_NAME, partitionArray.toArray());
            topicArray.add(topicData);
        }
View Full Code Here

    public OffsetCommitResponse(Struct struct) {
        super(struct);
        responseData = new HashMap<TopicPartition, Short>();
        for (Object topicResponseObj : struct.getArray(RESPONSES_KEY_NAME)) {
            Struct topicResponse = (Struct) topicResponseObj;
            String topic = topicResponse.getString(TOPIC_KEY_NAME);
            for (Object partitionResponseObj : topicResponse.getArray(PARTITIONS_KEY_NAME)) {
                Struct partitionResponse = (Struct) partitionResponseObj;
                int partition = partitionResponse.getInt(PARTITION_KEY_NAME);
                short errorCode = partitionResponse.getShort(ERROR_CODE_KEY_NAME);
                responseData.put(new TopicPartition(topic, partition), errorCode);
            }
        }
    }
View Full Code Here

        ClientRequest request = new ClientRequest(time.milliseconds(), true, send, null);
        awaitReady(client, node);
        client.poll(Arrays.asList(request), 1, time.milliseconds());
        assertEquals(1, client.inFlightRequestCount());
        ResponseHeader respHeader = new ResponseHeader(reqHeader.correlationId());
        Struct resp = new Struct(ProtoUtils.currentResponseSchema(ApiKeys.PRODUCE.id));
        resp.set("responses", new Object[0]);
        int size = respHeader.sizeOf() + resp.sizeOf();
        ByteBuffer buffer = ByteBuffer.allocate(size);
        respHeader.writeTo(buffer);
        resp.writeTo(buffer);
        buffer.flip();
        selector.completeReceive(new NetworkReceive(node.id(), buffer));
        List<ClientResponse> responses = client.poll(new ArrayList<ClientRequest>(), 1, time.milliseconds());
        assertEquals(1, responses.size());
        ClientResponse response = responses.get(0);
View Full Code Here

            assertEquals(error.exception().getClass(), e.getCause().getClass());
        }
    }

    private Struct produceResponse(String topic, int part, long offset, int error) {
        Struct struct = new Struct(ProtoUtils.currentResponseSchema(ApiKeys.PRODUCE.id));
        Struct response = struct.instance("responses");
        response.set("topic", topic);
        Struct partResp = response.instance("partition_responses");
        partResp.set("partition", part);
        partResp.set("error_code", (short) error);
        partResp.set("base_offset", offset);
        response.set("partition_responses", new Object[] { partResp });
        struct.set("responses", new Object[] { response });
        return struct;
    }
View Full Code Here

TOP

Related Classes of org.apache.kafka.common.protocol.types.Struct

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.