0

I have used a kafka Simple Consumer. All i want to do is that it will get message from the producer, then this data is processed in STORM and then after that will save to Cassandra.All goes well but the problem is that whenever i increases the maxReads value the kafka consumer goes to infinite loop and the data processing and saving to cassandra never gonna happen.So my question is what is the meaning of maxreads variable here? and how can i make this consumer like when producer sends messages it fetch and give this tuples to storm bolt and when producer stops, consumer goes into halt and when after sometime if producer sends message then it will consume pass it to storm bolt and then goes to halt and this process will work further.

Here is my Kafka-Consumer

package com.sethiyaji.kafka;

import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import kafka.api.FetchRequest;
import kafka.api.FetchRequestBuilder;
import kafka.api.OffsetRequest;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.common.ErrorMapping;
import kafka.common.TopicAndPartition;
import kafka.javaapi.FetchResponse;
import kafka.javaapi.OffsetResponse;
import kafka.javaapi.PartitionMetadata;
import kafka.javaapi.TopicMetadata;
import kafka.javaapi.TopicMetadataRequest;
import kafka.javaapi.TopicMetadataResponse;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.message.MessageAndOffset;

public class ConsumerKafka {
    private List<String> m_replicaBrokers;

    public ConsumerKafka() {
        m_replicaBrokers = new ArrayList<String>();
    }

    public void run(long maxReads, String topic, int partition,List<String> seedBrokers,int port ) throws Exception{
        PartitionMetadata partitionMetaData = findLeader(seedBrokers,port,topic,partition);
        if(partitionMetaData == null){
            System.out.println("Metadata not found");
            return;
        }
        if(partitionMetaData.leader() == null){
            System.out.println("Leader Not Found");
            return;
        }
        String leadBroker = partitionMetaData.leader().host();
        //String leadBroker = seedBrokers.get(0);
        String clientName = "Client_"+topic+"_"+partition;

        SimpleConsumer simpleConsumer = new SimpleConsumer(leadBroker, port, 100000, 64*1024, clientName);
        long readOffset = getLastOffset(simpleConsumer,topic,partition,OffsetRequest.EarliestTime(),clientName);
        int numErrors = 0;
        while(maxReads > 0){
            if(simpleConsumer == null){
                simpleConsumer = new SimpleConsumer(leadBroker, port, 100000,64*1024,clientName);
            }
            FetchRequest fetchRequest = new FetchRequestBuilder().clientId(clientName).addFetch(topic, partition, readOffset, 100000).build();
            //FetchRequest fetchRequest = new FetchRequestBuilder().addFetch(topic, partition, readOffset, 100000).build();
            //System.out.println("FETCH_REQUEST_PARTITION:"+fetchRequest.numPartitions());
            FetchResponse fetchResponse = simpleConsumer.fetch(fetchRequest);

            if(fetchResponse.hasError()){
                numErrors++;
                short code=fetchResponse.errorCode(topic, partition);
                if(numErrors > 5) break;
                if(code == ErrorMapping.OffsetOutOfRangeCode()){
                    readOffset = getLastOffset(simpleConsumer,topic,partition,OffsetRequest.LatestTime(),clientName);
                    continue;
                }
                simpleConsumer.close();
                simpleConsumer=null;
                leadBroker = findNewLeader(leadBroker,topic,partition,port);
                continue;
            }
            numErrors=0;

            long numRead = 0;
            for(MessageAndOffset messageAndOffset: fetchResponse.messageSet(topic, partition)){
                long currentOffset = messageAndOffset.offset();
                if(currentOffset<readOffset){
                    System.out.println("Found Old Offset:"+currentOffset+" Expecting: "+readOffset);
                    continue;
                }
                readOffset = messageAndOffset.nextOffset();
                ByteBuffer payload = messageAndOffset.message().payload();
                byte[] bytes = new byte[payload.limit()];
                payload.get(bytes);
                System.out.println(String.valueOf(messageAndOffset.offset())+":"+new String(bytes,"UTF-8"));
                numRead++;
                maxReads--;
            }
            if(numRead == 0){
            try{
                Thread.sleep(1000);
            }catch(InterruptedException e){
                System.out.println("Error:"+e);
                }
            }
        }
        //if(simpleConsumer!=null) simpleConsumer.close();    
    }

    public long getLastOffset(SimpleConsumer consumer, String topic, int partition,long whichTime,String clientName){
            TopicAndPartition topicAndPartition = new TopicAndPartition(topic,partition);
            Map<TopicAndPartition,PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition,PartitionOffsetRequestInfo>();
            requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
            kafka.javaapi.OffsetRequest offsetRequest = new kafka.javaapi.OffsetRequest(requestInfo,OffsetRequest.CurrentVersion(), clientName);
            OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest);
            if(offsetResponse.hasError()){
                System.out.println("Error feching data oddset Data the broker reaseon:"+offsetResponse.errorCode(topic,partition));
                return 0;
            }
            long[] offsets=offsetResponse.offsets(topic,partition);
            //System.out.println(offsets.length);
            return offsets[0];
    }
    private String findNewLeader(String oldLeader,String topic,int partition,int port)throws Exception{
        for(int i=0 ; i< 3;i++){
            boolean goToSleep=false;
            PartitionMetadata metaData=findLeader(m_replicaBrokers,port,topic,partition);
            if(metaData == null){
                goToSleep=true;
            } else if(metaData.leader()==null){
                goToSleep=true;
            } else if(oldLeader.equalsIgnoreCase(metaData.leader().host()) && i==0){
                goToSleep=true;
            } else{
                return metaData.leader().host();
            }

            if(goToSleep){
                try{
                    Thread.sleep(1000);
                }catch(InterruptedException e){
                    System.out.println("Error:"+e);
                }
            }
        }
        System.out.println("Unable to find new Leader after broker failure.Exiting");
        throw new Exception("Unable to find new Leader after broker failure.Exiting.");
    }
    private PartitionMetadata findLeader(List<String> seedBrokers,int port,String topic,int partition){
        PartitionMetadata returnMetadata=null;
        loop:
            for(String seed: seedBrokers){
                SimpleConsumer consumer=null;
                try{
                    consumer=new SimpleConsumer(seed,port,100000,64*1024,"id7");
                    List<String> topicsList= Collections.singletonList(topic);
                    TopicMetadataRequest request = new TopicMetadataRequest(topicsList);
                    TopicMetadataResponse response = consumer.send(request);
                    List<TopicMetadata> metaDataList= response.topicsMetadata();
                    for(TopicMetadata item: metaDataList){
                        for(PartitionMetadata part:item.partitionsMetadata()){
                            if(part.partitionId() == partition){
                                returnMetadata = part;
                                break loop;
                            }
                        }
                    }
                } catch(Exception e){
                    System.out.println("Error communicating with Broker ["+seed+"] to find Leader for["+topic+", "+partition+"]Reason:"+e);
                } finally{
                    if(consumer!=null) consumer.close();
                    for(kafka.cluster.Broker replica: returnMetadata.replicas()){
                            m_replicaBrokers.add(replica.host());
                    }
                }
            }
        return returnMetadata;
    }
}
Matthias J. Sax
  • 59,682
  • 7
  • 117
  • 137
Akash Sethiya
  • 73
  • 1
  • 4
  • 1
    if my understanding is correct then you are trying to use Kafka with Apache Storm. I would suggest take a look at [Kafka Spout](https://github.com/HolmesNL/kafka-spout/wiki) – user2720864 Jan 09 '15 at 13:20

1 Answers1

0

I am not sure about the particular problem in your simple consumer but for your problem a proper topology with spouts (in this case kafka spout) and bolts will be more relevant.

abi_pat
  • 572
  • 2
  • 12
  • 35