0

While working on some of our services in our project, we integrated protobuf into our design, so currently all our services communicate through kafka topics, each topic has its protobuf schema related to it.

we are trying to publish events directly to each topic but they are not formmated with protobuf decoding.

Is there any way to "load" schemas onto the "schemas" tab in control center?

Below there is the kafkas services in our docker-compose.yml file:

version: '3'



services:

  schemaregistry:
    image: confluentinc/cp-schema-registry:${CONFLUENTINC_SCHEMAREGITSRY_VERSION}
    container_name: schemaregistry
    env_file:
      - env/be.env
    profiles:
      - deploy
      - init
    depends_on:
      - zookeeper
    restart: always
    ports:
      - ${SCHEMA_REGISTRY_PORT}:${SCHEMA_REGISTRY_PORT}
    environment:
        SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper:${ZOOKEEPER_PORT}
        SCHEMA_REGISTRY_LISTENERS: ${SCHEMA_REGISTRY_LISTENER_HOST}:${SCHEMA_REGISTRY_LISTENER_PORT}



  zookeeper:
    image: confluentinc/cp-zookeeper:${CONFLUENTINC_VERSION}
    hostname: zookeeper
    container_name: zookeeper
    profiles:
      - deploy
      - init
    user: root
    restart: always
    ports:
      - ${ZOOKEEPER_PORT}:${ZOOKEEPER_PORT}
    environment:
      ZOOKEEPER_CLIENT_PORT: ${ZOOKEEPER_PORT}
      ZOOKEEPER_TICK_TIME: ${ZOOKEEPER_TICK_TIME}
    volumes:
     - be-person-zookeeper-data:/var/lib/zookeeper/data:rw
     - be-person-zookeeper-logs:/var/lib/zookeeper/log
     - be-person-zookeeper-secret:/etc/zookeeper/secrets
    healthcheck:
      test: nc -z localhost ${ZOOKEEPER_PORT} || exit -1
      interval: ${HEALTHCHECK_INTERVALS}
      timeout: ${HEALTHCHECK_TIMEOUT}
      retries: ${HEALTHCHECK_RETRIES}
      start_period: 10s
    logging:
      options:
        max-size: 50m


  kafka_broker:
    image: confluentinc/cp-server:${CONFLUENTINC_VERSION}
    hostname: kafka_broker
    profiles:
      - deploy
      - init
    user: root
    container_name: kafka_broker
    depends_on:
      zookeeper:
        condition: service_healthy
    restart: always
    ports:
      - ${KAFKA_HOST_PORT}:${KAFKA_HOST_PORT}
      - ${KAFKA_JMX_PORT}:${KAFKA_JMX_PORT}
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:${ZOOKEEPER_PORT}
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://${KAFKA_HOSTNAME}:${KAFKA_PORT}, PLAINTEXT_HOST://${KAFKA_LISTENER_IP}:${KAFKA_HOST_PORT}
      KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
      KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
      KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
      KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1
      KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
      KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
      KAFKA_JMX_PORT: ${KAFKA_JMX_PORT}
      KAFKA_JMX_HOSTNAME: localhost
      CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: ${KAFKA_HOSTNAME}:${KAFKA_PORT}
      CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
      CONFLUENT_METRICS_ENABLE: 'true'
      CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
      KAFKA_SOCKET_REQUEST_MAX_BYTES: 20971520
      KAFKA_MESSAGE_MAX_BYTES: 20971520
      KAFKA_LOG_RETENTION_HOURS: 1
      KAFKA_LOG_CLEANER_DELETE_RETENTION_MS: 3600000
    volumes:
      - be-person-kafka-logs:/var/log/kafka
      - be-person-kafka-data:/var/lib/kafka/data
      - be-person-kafka-secret:/etc/kafka/secrets
    healthcheck:
      test: ["CMD-SHELL", "kafka-topics --bootstrap-server 127.0.0.1:${KAFKA_HOST_PORT} --list"]
      interval: ${HEALTHCHECK_INTERVALS}
      timeout: ${HEALTHCHECK_TIMEOUT}
      retries: ${HEALTHCHECK_RETRIES}
      start_period: 10s
    logging:
      options:
        max-size: 50m


  control-center:
    image: confluentinc/cp-enterprise-control-center:${CONFLUENTINC_VERSION}
    hostname: control-center
    profiles:
      - deploy
    container_name: control-center
    restart: always
    depends_on:
      kafka_broker:
        condition: service_healthy
    ports:
      - ${CONTROL_CENTER_PORT}:${CONTROL_CENTER_PORT}
    environment:
      CONTROL_CENTER_BOOTSTRAP_SERVERS: ${KAFKA_HOSTNAME}:${KAFKA_PORT}
      CONTROL_CENTER_REPLICATION_FACTOR: 1
      CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1
      CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1
      CONFLUENT_METRICS_TOPIC_REPLICATION: 1
      CONTROL_CENTER_SCHEMA_REGISTRY_URL: ${SCHEMA_REGISTRY_LISTENER_HOST}:${SCHEMA_REGISTRY_LISTENER_PORT}
      PORT: ${CONTROL_CENTER_PORT}
    logging:
      options:
        max-size: 50m



networks:
  default:
    external: false
    name: retail_network
    driver: bridge
    ipam:
      driver: default
      config:
      - subnet: 10.104.0.1/16


volumes:

  be-person-zookeeper-data:
  be-person-zookeeper-secret:
  be-person-zookeeper-logs:

  be-person-kafka-secret:
  be-person-kafka-data:
  be-person-kafka-logs:

  be-person-consul-data:
  be-person-consul-logs:


`

0 Answers0