|
29 | 29 | import json
|
30 | 30 | import gc
|
31 | 31 | import struct
|
32 |
| -from copy import copy |
33 | 32 |
|
34 | 33 | try:
|
35 | 34 | # Memory tracker
|
@@ -1162,101 +1161,67 @@ def verify_config(expconfig, configs):
|
1162 | 1161 |
|
1163 | 1162 |
|
1164 | 1163 | def verify_avro_explicit_read_schema():
|
1165 |
| - """ verify that reading Avro with explicit reader schema works""" |
1166 | 1164 | from confluent_kafka import avro
|
1167 |
| - avsc_dir = os.path.join(os.path.dirname(__file__), os.pardir, 'avro') |
1168 | 1165 |
|
1169 |
| - # Producer config |
1170 |
| - conf = {'bootstrap.servers': bootstrap_servers, |
1171 |
| - 'error_cb': error_cb} |
| 1166 | + """ verify that reading Avro with explicit reader schema works""" |
| 1167 | + base_conf = {'bootstrap.servers': bootstrap_servers, |
| 1168 | + 'error_cb': error_cb, |
| 1169 | + 'schema.registry.url': schema_registry_url} |
1172 | 1170 |
|
1173 |
| - # Create producer |
1174 |
| - if schema_registry_url: |
1175 |
| - conf['schema.registry.url'] = schema_registry_url |
1176 |
| - p = avro.AvroProducer(conf) |
1177 |
| - |
1178 |
| - key_schema = avro.load(os.path.join(avsc_dir, "primitive_float.avsc")) |
1179 |
| - schema1 = avro.load(os.path.join(avsc_dir, "user_v1.avsc")) |
1180 |
| - schema2 = avro.load(os.path.join(avsc_dir, "user_v2.avsc")) |
1181 |
| - float_value = 32. |
1182 |
| - val = { |
1183 |
| - "name": "abc", |
1184 |
| - "favorite_number": 42, |
1185 |
| - "favorite_colo": "orange" |
| 1171 | + consumer_conf = dict(base_conf, **{ |
| 1172 | + 'group.id': 'test.py', |
| 1173 | + 'session.timeout.ms': 6000, |
| 1174 | + 'enable.auto.commit': False, |
| 1175 | + 'on_commit': print_commit_result, |
| 1176 | + 'auto.offset.reset': 'earliest', |
| 1177 | + 'schema.registry.url': schema_registry_url}) |
| 1178 | + |
| 1179 | + avsc_dir = os.path.join(os.path.dirname(__file__), os.pardir, 'avro') |
| 1180 | + writer_schema = avro.load(os.path.join(avsc_dir, "user_v1.avsc")) |
| 1181 | + reader_schema = avro.load(os.path.join(avsc_dir, "user_v2.avsc")) |
| 1182 | + |
| 1183 | + user_value1 = { |
| 1184 | + "name": " Rogers Nelson" |
1186 | 1185 | }
|
1187 |
| - val1 = { |
1188 |
| - "name": "abc" |
| 1186 | + |
| 1187 | + user_value2 = { |
| 1188 | + "name": "Kenny Loggins" |
1189 | 1189 | }
|
1190 | 1190 |
|
1191 | 1191 | combinations = [
|
1192 |
| - dict(value=val, value_schema=schema2, key=float_value, key_schema=key_schema, |
1193 |
| - reader_value_schema=schema1, reader_key_schema=key_schema), |
1194 |
| - dict(value=val1, value_schema=schema1, key=float_value, key_schema=key_schema, |
1195 |
| - reader_value_schema=schema2, reader_key_schema=key_schema), |
| 1192 | + dict(key=user_value1, key_schema=writer_schema, value=user_value2, value_schema=writer_schema), |
| 1193 | + dict(key=user_value2, key_schema=writer_schema, value=user_value1, value_schema=writer_schema) |
1196 | 1194 | ]
|
| 1195 | + avro_topic = topic + str(uuid.uuid4()) |
1197 | 1196 |
|
1198 |
| - # Consumer config |
1199 |
| - cons_conf = {'bootstrap.servers': bootstrap_servers, |
1200 |
| - 'group.id': 'test.py', |
1201 |
| - 'session.timeout.ms': 6000, |
1202 |
| - 'enable.auto.commit': False, |
1203 |
| - 'on_commit': print_commit_result, |
1204 |
| - 'error_cb': error_cb, |
1205 |
| - 'auto.offset.reset': 'earliest'} |
1206 |
| - |
| 1197 | + p = avro.AvroProducer(base_conf) |
1207 | 1198 | for i, combo in enumerate(combinations):
|
1208 |
| - reader_key_schema = combo.pop("reader_key_schema") |
1209 |
| - reader_value_schema = combo.pop("reader_value_schema") |
1210 |
| - combo['topic'] = str(uuid.uuid4()) |
1211 |
| - p.produce(**combo) |
1212 |
| - p.poll(0) |
1213 |
| - p.flush() |
1214 |
| - |
1215 |
| - # Create consumer |
1216 |
| - conf = copy(cons_conf) |
1217 |
| - if schema_registry_url: |
1218 |
| - conf['schema.registry.url'] = schema_registry_url |
1219 |
| - conf['enable.partition.eof'] = True |
1220 |
| - c = avro.AvroConsumer( |
1221 |
| - conf, |
1222 |
| - reader_key_schema=reader_key_schema, |
1223 |
| - reader_value_schema=reader_value_schema) |
1224 |
| - else: |
1225 |
| - raise ValueError("Property schema.registry.url must be set to run this test") |
| 1199 | + p.produce(topic=avro_topic, **combo) |
| 1200 | + p.flush() |
1226 | 1201 |
|
1227 |
| - c.subscribe([combo['topic']]) |
| 1202 | + c = avro.AvroConsumer(consumer_conf, reader_key_schema=reader_schema, reader_value_schema=reader_schema) |
| 1203 | + c.subscribe([avro_topic]) |
1228 | 1204 |
|
1229 |
| - while True: |
1230 |
| - msg = c.poll(0) |
1231 |
| - if msg is None: |
1232 |
| - continue |
1233 |
| - |
1234 |
| - if msg.error(): |
1235 |
| - if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF: |
1236 |
| - break |
1237 |
| - else: |
1238 |
| - continue |
| 1205 | + msgcount = 0 |
| 1206 | + while msgcount < len(combinations): |
| 1207 | + msg = c.poll(1) |
1239 | 1208 |
|
1240 |
| - tstype, timestamp = msg.timestamp() |
1241 |
| - print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s' % |
1242 |
| - (msg.topic(), msg.partition(), msg.offset(), |
1243 |
| - msg.key(), msg.value(), tstype, timestamp)) |
| 1209 | + if msg is None: |
| 1210 | + continue |
| 1211 | + if msg.error(): |
| 1212 | + print("Consumer error {}".format(msg.error())) |
| 1213 | + continue |
1244 | 1214 |
|
1245 |
| - # omit empty Avro fields from payload for comparison |
1246 |
| - record_key = msg.key() |
1247 |
| - record_value = msg.value() |
1248 |
| - if isinstance(msg.key(), dict): |
1249 |
| - record_key = {k: v for k, v in msg.key().items() if v is not None} |
1250 |
| - |
1251 |
| - if isinstance(msg.value(), dict): |
1252 |
| - record_value = {k: v for k, v in msg.value().items() if v is not None} |
1253 |
| - |
1254 |
| - assert combo.get('key') == record_key |
1255 |
| - assert combo.get('value')['name'] == record_value['name'] |
1256 |
| - c.commit(msg, asynchronous=False) |
1257 |
| - # Close consumer |
1258 |
| - c.close() |
1259 |
| - pass |
| 1215 | + msgcount += 1 |
| 1216 | + # Avro schema projection should return the two fields not present in the writer schema |
| 1217 | + try: |
| 1218 | + assert(msg.key().get('favorite_number') == 42) |
| 1219 | + assert(msg.key().get('favorite_color') == "purple") |
| 1220 | + assert(msg.value().get('favorite_number') == 42) |
| 1221 | + assert(msg.value().get('favorite_color') == "purple") |
| 1222 | + print("success: schema projection worked for explicit reader schema") |
| 1223 | + except KeyError: |
| 1224 | + raise confluent_kafka.avro.SerializerError("Schema projection failed when setting reader schema.") |
1260 | 1225 |
|
1261 | 1226 |
|
1262 | 1227 | default_modes = ['consumer', 'producer', 'avro', 'performance', 'admin']
|
|
0 commit comments