0

I'm developing KTable example looking at the link: https://cognizant.udemy.com/course/kafka-streams-real-time-stream-processing-master-class/learn/lecture/14244016#questions.

Error:

>HDFCBANK:1250.00
>[2022-11-08 17:07:45,505] WARN [Producer clientId=console-producer] Got error produce response with correlation id 5 on topic-partition stock-tick-0, retrying (2 attempts left). Error: NOT_ENOUGH_REPLICAS (org.apache.kafka.clients.producer.internals.Sender)
[2022-11-08 17:07:45,609] WARN [Producer clientId=console-producer] Got error produce response with correlation id 6 on topic-partition stock-tick-0, retrying (1 attempts left). Error: NOT_ENOUGH_REPLICAS (org.apache.kafka.clients.producer.internals.Sender)
[2022-11-08 17:07:45,713] WARN [Producer clientId=console-producer] Got error produce response with correlation id 7 on topic-partition stock-tick-0, retrying (0 attempts left). Error: NOT_ENOUGH_REPLICAS (org.apache.kafka.clients.producer.internals.Sender)
[2022-11-08 17:07:45,821] ERROR Error when sending message to topic stock-tick with key: 8 bytes, value: 7 bytes with error: (org.apache.kafka.clients.producer.internals.ErrorLoggingCallback)
org.apache.kafka.common.errors.NotEnoughReplicasException: Messages are rejected since there are fewer in-sync replicas than required.

Code:

AppConfigs.java
class AppConfigs {
    final static String applicationID = "StreamingTable";
    final static String bootstrapServers = "localhost:9092";
    final static String topicName = "stock-tick";
    final static String stateStoreLocation = "tmp/state-store";
    final static String stateStoreName = "kt01-store";
    final static String regExSymbol = "(?i)HDFCBANK|TCS";
    final static String queryServerHost = "localhost";
    final static int queryServerPort = 7010;
}

QueryServer.java

class QueryServer {
    private static final Logger logger = LogManager.getLogger();
    private final String NO_RESULTS = "No Results Found";
    private final String APPLICATION_NOT_ACTIVE = "Application is not active. Try later.";
    private final KafkaStreams streams;
    private Boolean isActive = false;
    private final HostInfo hostInfo;
    private Client client;

    QueryServer(KafkaStreams streams, String hostname, int port) {
        this.streams = streams;
        this.hostInfo = new HostInfo(hostname, port);
        client = ClientBuilder.newClient();
    }

    void setActive(Boolean state) {
        isActive = state;
    }

    private List<KeyValue<String, String>> readAllFromLocal() {

        List<KeyValue<String, String>> localResults = new ArrayList<>();
        ReadOnlyKeyValueStore<String, String> stateStore =
            streams.store(AppConfigs.stateStoreName, QueryableStoreTypes.keyValueStore());

        stateStore.all().forEachRemaining(localResults::add);
        return localResults;
    }

    void start() {
        logger.info("Starting Query Server at http://" + hostInfo.host() + ":" + hostInfo.port()
            + "/" + AppConfigs.stateStoreName + "/all");

        Spark.port(hostInfo.port());

        Spark.get("/" + AppConfigs.stateStoreName + "/all", (req, res) -> {

            List<KeyValue<String, String>> allResults;
            String results;

            if (!isActive) {
                results = APPLICATION_NOT_ACTIVE;
            } else {
                allResults = readAllFromLocal();
                results = (allResults.size() == 0) ? NO_RESULTS
                    : allResults.toString();
            }
            return results;
        });

    }

    void stop() {
        client.close();
        Spark.stop();
    }

}

MainApp.java

import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.kstream.KTable;
import org.apache.kafka.streams.kstream.Materialized;
import org.apache.kafka.streams.kstream.Printed;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

import java.util.Properties;

public class StreamingTableApp {
    private static final Logger logger = LogManager.getLogger();

    public static void main(final String[] args) {

        final Properties props = new Properties();
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID);
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers);
        props.put(StreamsConfig.STATE_DIR_CONFIG, AppConfigs.stateStoreLocation);
        props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

        StreamsBuilder streamsBuilder = new StreamsBuilder();
        KTable<String, String> KT0 = streamsBuilder.table(AppConfigs.topicName);

        //Print the incoming message
        KT0.toStream().print(Printed.<String, String>toSysOut().withLabel("KT0"));

        KTable<String, String> KT1 = KT0.filter((k, v) -> k.matches(AppConfigs.regExSymbol) && !v.isEmpty(),
            Materialized.as(AppConfigs.stateStoreName));
        KT1.toStream().print(Printed.<String, String>toSysOut().withLabel("KT1"));

        KafkaStreams streams = new KafkaStreams(streamsBuilder.build(), props);

        //Query Server
        QueryServer queryServer = new QueryServer(streams, AppConfigs.queryServerHost, AppConfigs.queryServerPort);
        streams.setStateListener((newState, oldState) -> {
            logger.info("State Changing to " + newState + " from " + oldState);
            queryServer.setActive(newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING);
        });

        streams.start();
        queryServer.start();

        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            logger.info("Shutting down servers");
            queryServer.stop();
            streams.close();
        }));

    }
}

Scripts which I'm using

$KAFKA_HOME/bin/zookeeper-server-start.sh $KAFKA_HOME/config/zookeeper.properties
$KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server-0.properties
$KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server-1.properties
$KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server-2.properties
$KAFKA_HOME/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 1 --topic stock-tick
$KAFKA_HOME/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic stock-tick --property parse.key=true --property key.separator=":"
OneCricketeer
  • 179,855
  • 19
  • 132
  • 245
Jeff Cook
  • 7,956
  • 36
  • 115
  • 186

0 Answers0