-
Notifications
You must be signed in to change notification settings - Fork 919
[KIP-848] Added online upgrade and downgrade test #2012
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
@@ -0,0 +1,131 @@ | ||||||||||||||||||
#!/usr/bin/env python | ||||||||||||||||||
# -*- coding: utf-8 -*- | ||||||||||||||||||
# | ||||||||||||||||||
# Copyright 2025 Confluent Inc. | ||||||||||||||||||
# | ||||||||||||||||||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||||||||||||||||||
# you may not use this file except in compliance with the License. | ||||||||||||||||||
# You may obtain a copy of the License at | ||||||||||||||||||
# | ||||||||||||||||||
# http://www.apache.org/licenses/LICENSE-2.0 | ||||||||||||||||||
# | ||||||||||||||||||
# Unless required by applicable law or agreed to in writing, software | ||||||||||||||||||
# distributed under the License is distributed on an "AS IS" BASIS, | ||||||||||||||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||||||||||||||
# See the License for the specific language governing permissions and | ||||||||||||||||||
# limit | ||||||||||||||||||
|
||||||||||||||||||
import pytest | ||||||||||||||||||
from confluent_kafka import ConsumerGroupType, IsolationLevel, KafkaException, TopicPartition | ||||||||||||||||||
from confluent_kafka.admin import OffsetSpec | ||||||||||||||||||
from tests.common import TestUtils | ||||||||||||||||||
|
||||||||||||||||||
topic_prefix = "test_consumer_upgrade_downgrade_" | ||||||||||||||||||
number_of_partitions = 10 | ||||||||||||||||||
|
||||||||||||||||||
|
||||||||||||||||||
def get_group_protocol_type(a, group_id): | ||||||||||||||||||
futureMap = a.describe_consumer_groups([group_id]) | ||||||||||||||||||
Check warning on line 28 in tests/integration/consumer/test_consumer_upgrade_downgrade.py
|
||||||||||||||||||
try: | ||||||||||||||||||
future = futureMap[group_id] | ||||||||||||||||||
g = future.result() | ||||||||||||||||||
return g.type | ||||||||||||||||||
except KafkaException as e: | ||||||||||||||||||
print("Error while describing group id '{}': {}".format(group_id, e)) | ||||||||||||||||||
except Exception: | ||||||||||||||||||
raise | ||||||||||||||||||
Check warning on line 36 in tests/integration/consumer/test_consumer_upgrade_downgrade.py
|
||||||||||||||||||
|
||||||||||||||||||
|
||||||||||||||||||
def list_offsets(a, topic, no_of_partitions): | ||||||||||||||||||
topic_partition_offsets = {} | ||||||||||||||||||
for partition in range(no_of_partitions): | ||||||||||||||||||
topic_partition = TopicPartition(topic, partition) | ||||||||||||||||||
topic_partition_offsets[topic_partition] = OffsetSpec.latest() | ||||||||||||||||||
futmap = a.list_offsets(topic_partition_offsets, isolation_level=IsolationLevel.READ_COMMITTED, request_timeout=30) | ||||||||||||||||||
for partition, fut in futmap.items(): | ||||||||||||||||||
try: | ||||||||||||||||||
result = fut.result() | ||||||||||||||||||
print("Topicname : {} Partition_Index : {} Offset : {} Timestamp : {}" | ||||||||||||||||||
.format(partition.topic, partition.partition, result.offset, | ||||||||||||||||||
result.timestamp)) | ||||||||||||||||||
except KafkaException as e: | ||||||||||||||||||
print("Topicname : {} Partition_Index : {} Error : {}" | ||||||||||||||||||
.format(partition.topic, partition.partition, e)) | ||||||||||||||||||
|
||||||||||||||||||
|
||||||||||||||||||
# def produce_messages(producer, topic, partitions, num_messages): | ||||||||||||||||||
# for i in range(num_messages): | ||||||||||||||||||
# key = "key-{}".format(i) | ||||||||||||||||||
# value = "value-{}".format(i) | ||||||||||||||||||
# partition = i % partitions | ||||||||||||||||||
# producer.produce(topic, key=key, value=value, partition=partition) | ||||||||||||||||||
# producer.flush() | ||||||||||||||||||
Comment on lines
+56
to
+62
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This commented-out function should be removed as it's not being used and adds clutter to the codebase.
Suggested change
Copilot uses AI. Check for mistakes. Positive FeedbackNegative Feedback |
||||||||||||||||||
|
||||||||||||||||||
|
||||||||||||||||||
def check_consumer(kafka_cluster, consumers, admin_client, topic, expected_protocol): | ||||||||||||||||||
total_msg_read = 0 | ||||||||||||||||||
while len(consumers[-1].assignment()) != number_of_partitions // len(consumers): | ||||||||||||||||||
for consumer in consumers: | ||||||||||||||||||
consumer.poll(0.1) | ||||||||||||||||||
|
||||||||||||||||||
for consumer in consumers: | ||||||||||||||||||
assert len(consumer.assignment()) == number_of_partitions // len(consumers) | ||||||||||||||||||
|
||||||||||||||||||
assert get_group_protocol_type(admin_client, topic) == expected_protocol | ||||||||||||||||||
|
||||||||||||||||||
# Produce some messages to the topic | ||||||||||||||||||
kafka_cluster.seed_topic(topic) | ||||||||||||||||||
list_offsets(admin_client, topic, number_of_partitions) | ||||||||||||||||||
|
||||||||||||||||||
while total_msg_read < 100: | ||||||||||||||||||
for consumer in consumers: | ||||||||||||||||||
# Poll for messages | ||||||||||||||||||
msg = consumer.poll(0.1) | ||||||||||||||||||
if msg is not None: | ||||||||||||||||||
total_msg_read += 1 | ||||||||||||||||||
|
||||||||||||||||||
assert total_msg_read == 100, "Expected to read 100 messages, but read {}".format(total_msg_read) | ||||||||||||||||||
|
||||||||||||||||||
|
||||||||||||||||||
def perform_consumer_upgrade_downgrade_test_with_partition_assignment_strategy(kafka_cluster, partition_assignment_strategy): | ||||||||||||||||||
""" | ||||||||||||||||||
Test consumer upgrade and downgrade. | ||||||||||||||||||
""" | ||||||||||||||||||
topic = kafka_cluster.create_topic_and_wait_propogation(topic_prefix, | ||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The method name contains a typo: 'propogation' should be 'propagation'.
Suggested change
Copilot uses AI. Check for mistakes. Positive FeedbackNegative Feedback |
||||||||||||||||||
{ | ||||||||||||||||||
"num_partitions": number_of_partitions | ||||||||||||||||||
}) | ||||||||||||||||||
admin_client = kafka_cluster.admin() | ||||||||||||||||||
|
||||||||||||||||||
# Create a consumer with the latest version | ||||||||||||||||||
consumer_conf = {'group.id': topic, | ||||||||||||||||||
'auto.offset.reset': 'earliest', | ||||||||||||||||||
'group.protocol': 'classic'} | ||||||||||||||||||
consumer_conf['partition.assignment.strategy'] = partition_assignment_strategy | ||||||||||||||||||
consumer = kafka_cluster.consumer(consumer_conf) | ||||||||||||||||||
assert consumer is not None | ||||||||||||||||||
consumer.subscribe([topic]) | ||||||||||||||||||
check_consumer(kafka_cluster, [consumer], admin_client, topic, ConsumerGroupType.CLASSIC) | ||||||||||||||||||
del consumer_conf['partition.assignment.strategy'] | ||||||||||||||||||
|
||||||||||||||||||
# Now simulate an upgrade by creating a new consumer with 'consumer' protocol | ||||||||||||||||||
consumer_conf['group.protocol'] = 'consumer' | ||||||||||||||||||
consumer2 = kafka_cluster.consumer(consumer_conf) | ||||||||||||||||||
assert consumer2 is not None | ||||||||||||||||||
consumer2.subscribe([topic]) | ||||||||||||||||||
check_consumer(kafka_cluster, [consumer, consumer2], admin_client, topic, ConsumerGroupType.CONSUMER) | ||||||||||||||||||
|
||||||||||||||||||
# Now simulate a downgrade by deleting the second consumer and keeping only 'classic' consumer | ||||||||||||||||||
consumer2.close() | ||||||||||||||||||
check_consumer(kafka_cluster, [consumer], admin_client, topic, ConsumerGroupType.CLASSIC) | ||||||||||||||||||
|
||||||||||||||||||
consumer.close() | ||||||||||||||||||
kafka_cluster.delete_topic(topic) | ||||||||||||||||||
|
||||||||||||||||||
|
||||||||||||||||||
@pytest.mark.skipif(not TestUtils.use_group_protocol_consumer(), | ||||||||||||||||||
reason="Skipping test as group protocol consumer is not enabled") | ||||||||||||||||||
def test_consumer_upgrade_downgrade(kafka_cluster): | ||||||||||||||||||
perform_consumer_upgrade_downgrade_test_with_partition_assignment_strategy(kafka_cluster, 'roundrobin') | ||||||||||||||||||
perform_consumer_upgrade_downgrade_test_with_partition_assignment_strategy(kafka_cluster, 'range') | ||||||||||||||||||
perform_consumer_upgrade_downgrade_test_with_partition_assignment_strategy(kafka_cluster, 'cooperative-sticky') |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The comment appears to be truncated. It should likely read '# limitations under the License.' to complete the Apache License header.
Copilot uses AI. Check for mistakes.