I am trying to implement SASL/oauthbearer in my cpp kafka producer application using librdkafka v2.2.0,
From the available documentation I've framed the implementation, but oauth_token_refresh_call_back is not working, token not getting refreshed(callback not invoked). I will attach the code snippet , please verify and report the problem/mistakes if any
Is kerberos setup needed ?
static void dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
if (rkmessage->err) {
LOG_FATAL << "Message delivery failed: " << rd_kafka_err2str(rkmessage->err);
} else {
LOG_DEBUG << "Message delivered ( " << rkmessage->len << " bytes, partition %" << rkmessage->partition << ")";
}
}
static void oauthbearer_token_refresh_cb(rd_kafka_t* rk, const char* oauthbearer_config, void* opaque) {
std::ostringstream ss;
ptree tokenData = GenUtil::GetoAuthAccessToken(clientId, clientSecret, refToken, url);
std::string accToken = tokenData.get<std::string>("access_token");
unsigned long long tokenExpTime = TimeUtil::TimeNow() + tokenData.get<unsigned long long>("expires_in") * 1000;
boost::property_tree::ptree data;
data.put("accessTtoken", accToken);
data.put("expiresIn", tokenExpTime);
write_json(ss, data, false);
std::string contentStr = ss.str();
if ( !accToken.empty()) {
std::string encodedToken = base64Encode(reinterpret_cast<const unsigned char*> (contentStr.c_str()), contentStr.size());
rd_kafka_oauthbearer_set_token(rk, encodedToken.c_str(), tokenExpTime, NULL, NULL, 0, NULL, 0);
} else {
std::string errStr = "Error in getting access token";
rd_kafka_oauthbearer_set_token_failure(rk, errStr.c_str());
}
}
int Init() {
conf = rd_kafka_conf_new();
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers,
errstr, sizeof (errstr)) != RD_KAFKA_CONF_OK
|| rd_kafka_conf_set(conf, "security.protocol", "SASL_PLAINTEXT",
errstr, sizeof (errstr)) != RD_KAFKA_CONF_OK
|| rd_kafka_conf_set(conf, "sasl.mechanism", "OAUTHBEARER",
errstr, sizeof (errstr)) != RD_KAFKA_CONF_OK ) {
LOG_FATAL << "Error in rd_kafka_conf_set(): " << errstr;
return 1;
}
// custom call back function
rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof (errstr));
if (!rk) {
LOG_ERROR << "Failed to create new producer: %s\n" << errstr;
return 1;
}
//enable sasl background calls
rd_kafka_sasl_background_callbacks_enable(rk);
//topic creation
rkt = rd_kafka_topic_new(rk, topic, NULL);
if (!rkt) {
LOG_ERROR << "Failed to create topic object: %s\n" << rd_kafka_err2str(rd_kafka_last_error());
rd_kafka_destroy(rk);
return 1;
}
// token refresh callback
rd_kafka_conf_set_oauthbearer_token_refresh_cb(conf, oauthbearer_token_refresh_cb);
// enable ssl queue
rd_kafka_conf_enable_sasl_queue(conf, 1);
return 0;
}
int Send(std::string data) {
new char [data.length() + 1];
std::strcpy(b, data.c_str());
RETRY:
if (rd_kafka_produce(
rkt,
RD_KAFKA_PARTITION_UA,
RD_KAFKA_MSG_F_COPY,
b,
data.length(),
NULL, 0,
NULL) != 0) {
LOG_FATAL << "%% Failed to produce to topic" << rd_kafka_topic_name(rkt) << ": " << rd_kafka_err2str(rd_kafka_last_error());
if (rd_kafka_last_error() == RD_KAFKA_RESP_ERR__QUEUE_FULL) {
rd_kafka_poll(rk, 1000/*block for max 1000ms*/);
goto RETRY;
}
} else {
LOG_FATAL << "%% Enqueued message (" << data.length() << " bytes) for topic " << rd_kafka_topic_name(rkt);
}
delete b;
rd_kafka_poll(rk, 0/*non-blocking*/);
int err = rd_kafka_last_error();
LOG_FATAL << err;
return err;
}