Skip to content

[KIP-848] Added online upgrade and downgrade test #2012

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 14 additions & 11 deletions tests/common/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,22 +55,25 @@

@staticmethod
def update_conf_group_protocol(conf=None):
if conf is not None and 'group.id' in conf and TestUtils.use_group_protocol_consumer():
if TestUtils.can_upgrade_group_protocol_to_consumer(conf):
conf['group.protocol'] = 'consumer'

Check failure on line 59 in tests/common/__init__.py

View check run for this annotation

SonarQube-Confluent / confluent-kafka-python Sonarqube Results

tests/common/__init__.py#L59

Define a constant instead of duplicating this literal 'group.protocol' 3 times.

@staticmethod
def can_upgrade_group_protocol_to_consumer(conf):
return conf is not None and 'group.id' in conf and 'group.protocol' not in conf and TestUtils.use_group_protocol_consumer()

@staticmethod
def remove_forbidden_conf_group_protocol_consumer(conf):
if conf is None:
if conf is None or not TestUtils.use_group_protocol_consumer() or conf.get('group.protocol', 'consumer') != 'consumer':
return
if TestUtils.use_group_protocol_consumer():
forbidden_conf_properties = ["session.timeout.ms",
"partition.assignment.strategy",
"heartbeat.interval.ms",
"group.protocol.type"]
for prop in forbidden_conf_properties:
if prop in conf:
print("Skipping setting forbidden configuration {prop} for `CONSUMER` protocol")
del conf[prop]
forbidden_conf_properties = ["session.timeout.ms",
"partition.assignment.strategy",
"heartbeat.interval.ms",
"group.protocol.type"]
for prop in forbidden_conf_properties:
if prop in conf:
print(f"Skipping setting forbidden configuration {prop} for `CONSUMER` protocol")
del conf[prop]


class TestConsumer(Consumer):
Expand Down
15 changes: 14 additions & 1 deletion tests/integration/cluster_fixture.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,19 @@ def create_topic(self, prefix, conf=None, **create_topic_kwargs):

future_topic.get(name).result()
return name

def delete_topic(self, topic):
"""
Deletes a topic with this cluster.

:param str topic: topic name
"""
future = self.admin().delete_topics([topic])
try:
future.get(topic).result()
print("Topic {} deleted".format(topic))
except Exception as e:
print("Failed to delete topic {}: {}".format(topic, e))

def create_topic_and_wait_propogation(self, prefix, conf=None, **create_topic_kwargs):
"""
Expand Down Expand Up @@ -273,7 +286,7 @@ def seed_topic(self, topic, value_source=None, key_source=None, header_source=No
value_source = ['test-data{}'.format(i) for i in range(0, 100)]

if key_source is None:
key_source = [None]
key_source = ['test-key{}'.format(i) for i in range(0, 100)]

KafkaClusterFixture._produce(self._producer, topic, value_source, key_source, header_source)

Expand Down
131 changes: 131 additions & 0 deletions tests/integration/consumer/test_consumer_upgrade_downgrade.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2025 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limit
Copy link
Preview

Copilot AI Jul 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The comment appears to be truncated. It should likely read '# limitations under the License.' to complete the Apache License header.

Suggested change
# limit
# limitations under the License.

Copilot uses AI. Check for mistakes.


import pytest
from confluent_kafka import ConsumerGroupType, IsolationLevel, KafkaException, TopicPartition
from confluent_kafka.admin import OffsetSpec
from tests.common import TestUtils

topic_prefix = "test_consumer_upgrade_downgrade_"
number_of_partitions = 10


def get_group_protocol_type(a, group_id):
futureMap = a.describe_consumer_groups([group_id])

Check warning on line 28 in tests/integration/consumer/test_consumer_upgrade_downgrade.py

View check run for this annotation

SonarQube-Confluent / confluent-kafka-python Sonarqube Results

tests/integration/consumer/test_consumer_upgrade_downgrade.py#L28

Rename this local variable "futureMap" to match the regular expression ^[_a-z][a-z0-9_]*$.
try:
future = futureMap[group_id]
g = future.result()
return g.type
except KafkaException as e:
print("Error while describing group id '{}': {}".format(group_id, e))
except Exception:
raise

Check warning on line 36 in tests/integration/consumer/test_consumer_upgrade_downgrade.py

View check run for this annotation

SonarQube-Confluent / confluent-kafka-python Sonarqube Results

tests/integration/consumer/test_consumer_upgrade_downgrade.py#L36

Add logic to this except clause or eliminate it and rethrow the exception automatically.


def list_offsets(a, topic, no_of_partitions):
topic_partition_offsets = {}
for partition in range(no_of_partitions):
topic_partition = TopicPartition(topic, partition)
topic_partition_offsets[topic_partition] = OffsetSpec.latest()
futmap = a.list_offsets(topic_partition_offsets, isolation_level=IsolationLevel.READ_COMMITTED, request_timeout=30)
for partition, fut in futmap.items():
try:
result = fut.result()
print("Topicname : {} Partition_Index : {} Offset : {} Timestamp : {}"
.format(partition.topic, partition.partition, result.offset,
result.timestamp))
except KafkaException as e:
print("Topicname : {} Partition_Index : {} Error : {}"
.format(partition.topic, partition.partition, e))


# def produce_messages(producer, topic, partitions, num_messages):

Check warning on line 56 in tests/integration/consumer/test_consumer_upgrade_downgrade.py

View check run for this annotation

SonarQube-Confluent / confluent-kafka-python Sonarqube Results

tests/integration/consumer/test_consumer_upgrade_downgrade.py#L56

Remove this commented out code.
# for i in range(num_messages):
# key = "key-{}".format(i)
# value = "value-{}".format(i)
# partition = i % partitions
# producer.produce(topic, key=key, value=value, partition=partition)
# producer.flush()
Comment on lines +56 to +62
Copy link
Preview

Copilot AI Jul 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This commented-out function should be removed as it's not being used and adds clutter to the codebase.

Suggested change
# def produce_messages(producer, topic, partitions, num_messages):
# for i in range(num_messages):
# key = "key-{}".format(i)
# value = "value-{}".format(i)
# partition = i % partitions
# producer.produce(topic, key=key, value=value, partition=partition)
# producer.flush()
# Removed the commented-out `produce_messages` function to reduce clutter.

Copilot uses AI. Check for mistakes.



def check_consumer(kafka_cluster, consumers, admin_client, topic, expected_protocol):
total_msg_read = 0
while len(consumers[-1].assignment()) != number_of_partitions // len(consumers):
for consumer in consumers:
consumer.poll(0.1)

for consumer in consumers:
assert len(consumer.assignment()) == number_of_partitions // len(consumers)

assert get_group_protocol_type(admin_client, topic) == expected_protocol

# Produce some messages to the topic
kafka_cluster.seed_topic(topic)
list_offsets(admin_client, topic, number_of_partitions)

while total_msg_read < 100:
for consumer in consumers:
# Poll for messages
msg = consumer.poll(0.1)
if msg is not None:
total_msg_read += 1

assert total_msg_read == 100, "Expected to read 100 messages, but read {}".format(total_msg_read)


def perform_consumer_upgrade_downgrade_test_with_partition_assignment_strategy(kafka_cluster, partition_assignment_strategy):
"""
Test consumer upgrade and downgrade.
"""
topic = kafka_cluster.create_topic_and_wait_propogation(topic_prefix,
Copy link
Preview

Copilot AI Jul 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The method name contains a typo: 'propogation' should be 'propagation'.

Suggested change
topic = kafka_cluster.create_topic_and_wait_propogation(topic_prefix,
topic = kafka_cluster.create_topic_and_wait_propagation(topic_prefix,

Copilot uses AI. Check for mistakes.

{
"num_partitions": number_of_partitions
})
admin_client = kafka_cluster.admin()

# Create a consumer with the latest version
consumer_conf = {'group.id': topic,
'auto.offset.reset': 'earliest',
'group.protocol': 'classic'}
consumer_conf['partition.assignment.strategy'] = partition_assignment_strategy
consumer = kafka_cluster.consumer(consumer_conf)
assert consumer is not None
consumer.subscribe([topic])
check_consumer(kafka_cluster, [consumer], admin_client, topic, ConsumerGroupType.CLASSIC)
del consumer_conf['partition.assignment.strategy']

# Now simulate an upgrade by creating a new consumer with 'consumer' protocol
consumer_conf['group.protocol'] = 'consumer'
consumer2 = kafka_cluster.consumer(consumer_conf)
assert consumer2 is not None
consumer2.subscribe([topic])
check_consumer(kafka_cluster, [consumer, consumer2], admin_client, topic, ConsumerGroupType.CONSUMER)

# Now simulate a downgrade by deleting the second consumer and keeping only 'classic' consumer
consumer2.close()
check_consumer(kafka_cluster, [consumer], admin_client, topic, ConsumerGroupType.CLASSIC)

consumer.close()
kafka_cluster.delete_topic(topic)


@pytest.mark.skipif(not TestUtils.use_group_protocol_consumer(),
reason="Skipping test as group protocol consumer is not enabled")
def test_consumer_upgrade_downgrade(kafka_cluster):
perform_consumer_upgrade_downgrade_test_with_partition_assignment_strategy(kafka_cluster, 'roundrobin')
perform_consumer_upgrade_downgrade_test_with_partition_assignment_strategy(kafka_cluster, 'range')
perform_consumer_upgrade_downgrade_test_with_partition_assignment_strategy(kafka_cluster, 'cooperative-sticky')
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy