We are using a kafka consumer app based on the example below: The problem: we receive Error in SetErrorHandler: {"Code":"Local_Transport","Message":"sasl_ssl://XXXX.servicebus.windows.net:9093/0: Disconnected (after 3654528ms in state UP)"}
{"Code":"Local_Transport","Message":"GroupCoordinator: XXXX.servicebus.windows.net:9093: Disconnected (after 3654381ms in state UP)"}
{"Code":"Local_AllBrokersDown","Message":"5/5 brokers are down"}
public void Consume()
{
try
{
int i = 0;
int j = 0;
const int commitPeriod = 5;
var config = new ConsumerConfig
{
BootstrapServers = BootstrapServers,
GroupId = "$Default",
SecurityProtocol = SecurityProtocol.SaslSsl,
SocketTimeoutMs = 60000,
SessionTimeoutMs =10000,
StatisticsIntervalMs =9000,
SaslMechanism = SaslMechanism.OAuthBearer,
AutoOffsetReset = AutoOffsetReset.Earliest,
SocketKeepaliveEnable = true,
EnableAutoCommit = true,
EnablePartitionEof = true,
PartitionAssignmentStrategy = PartitionAssignmentStrategy.CooperativeSticky
};
using (var consumer = new ConsumerBuilder<string, string>(config)
// Note: All handlers are called on the main .Consume thread.
.SetOAuthBearerTokenRefreshHandler(OAuthBearerTokenRefreshHandler)
.SetErrorHandler((_, e) => Console.WriteLine($"Error: {e.Reason}"))
.SetPartitionsAssignedHandler((c, partitions) =>
{
// Since a cooperative assignor (CooperativeSticky) has been configured, the
// partition assignment is incremental (adds partitions to any existing assignment).
Console.WriteLine(
"Partitions incrementally assigned: [" +
string.Join(',', partitions.Select(p => p.Partition.Value)) +
"], all: [" +
string.Join(',', c.Assignment.Concat(partitions).Select(p => p.Partition.Value)) +
"]");
// Possibly manually specify start offsets by returning a list of topic/partition/offsets
// to assign to, e.g.:
// return partitions.Select(tp => new TopicPartitionOffset(tp, externalOffsets[tp]));
})
.SetPartitionsRevokedHandler((c, partitions) =>
{
// Since a cooperative assignor (CooperativeSticky) has been configured, the revoked
// assignment is incremental (may remove only some partitions of the current assignment).
var remaining = c.Assignment.Where(atp => partitions.Where(rtp => rtp.TopicPartition == atp).Count() == 0);
Console.WriteLine(
"Partitions incrementally revoked: [" +
string.Join(',', partitions.Select(p => p.Partition.Value)) +
"], remaining: [" +
string.Join(',', remaining.Select(p => p.Partition.Value)) +
"]");
})
.SetPartitionsLostHandler((c, partitions) =>
{
// The lost partitions handler is called when the consumer detects that it has lost ownership
// of its assignment (fallen out of the group).
Console.WriteLine($"Partitions were lost: [{string.Join(", ", partitions)}]");
})
.Build())
{
String[] TopicsList = consumerCriteria.workeroptions.Kafka.Topics.ToString().Split(',');
consumer.Subscribe(TopicsList);
try
{
while (true)
{
try
{
var consumeResult = consumer.Consume(cts.Token);
if (consumeResult.IsPartitionEOF)
{
continue;
}
processMessage(consumeResult.Message.Value, eventFileName, consumeResult.Topic);
if (consumeResult.Offset % commitPeriod == 0)
{
// The Commit method sends a "commit offsets" request to the Kafka
// cluster and synchronously waits for the response. This is very
// slow compared to the rate at which the consumer is capable of
// consuming messages. A high performance application will typically
// commit offsets relatively infrequently and be designed handle
// duplicate messages in the event of failure.
try
{
consumer.Commit(consumeResult);
consumer.StoreOffset(consumeResult);
}
catch (KafkaException e)
{
Console.WriteLine($"Commit error: {e.Error.Reason}");
}
finally
{
consumer.Close();
}
}
}
catch (ConsumeException e)
{
Console.WriteLine($"Consume error: {e.Error.Reason}");
}
}
}
catch (OperationCanceledException e)
{
Console.WriteLine("Closing consumer.");
consumer.Close();
}
}
}
catch (Exception e)
{
Console.WriteLine("Exception consumer.");
}
}