|
| 1 | +#!/usr/bin/env python |
| 2 | +# |
| 3 | +# Copyright 2018 Confluent Inc. |
| 4 | +# |
| 5 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | +# you may not use this file except in compliance with the License. |
| 7 | +# You may obtain a copy of the License at |
| 8 | +# |
| 9 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +# |
| 11 | +# Unless required by applicable law or agreed to in writing, software |
| 12 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | +# See the License for the specific language governing permissions and |
| 15 | +# limitations under the License. |
| 16 | + |
| 17 | + |
| 18 | +# This is a simple example demonstrating how to produce a message to |
| 19 | +# Confluent Cloud then read it back again. |
| 20 | +# |
| 21 | +# https://www.confluent.io/confluent-cloud/ |
| 22 | +# |
| 23 | +# Auto-creation of topics is disabled in Confluent Cloud. You will need to |
| 24 | +# use the ccloud cli to create the python-test-topic topic before running this |
| 25 | +# example. |
| 26 | +# |
| 27 | +# $ ccloud topic create python-test-topic |
| 28 | +# |
| 29 | +# The <ccloud bootstrap servers>, <ccloud key> and <ccloud secret> parameters |
| 30 | +# are available via the Confluent Cloud web interface. For more information, |
| 31 | +# refer to the quick-start: |
| 32 | +# |
| 33 | +# https://docs.confluent.io/current/cloud-quickstart.html |
| 34 | +# |
| 35 | +# to execute using Python 2.7: |
| 36 | +# $ virtualenv ccloud_example |
| 37 | +# $ source ccloud_example/bin/activate |
| 38 | +# $ pip install confluent_kafka |
| 39 | +# $ python confluent_cloud.py |
| 40 | +# $ deactivate |
| 41 | +# |
| 42 | +# to execute using Python 3.x: |
| 43 | +# $ python -m venv ccloud_example |
| 44 | +# $ source ccloud_example/bin/activate |
| 45 | +# $ pip install confluent_kafka |
| 46 | +# $ python confluent_cloud.py |
| 47 | +# $ deactivate |
| 48 | + |
| 49 | +import uuid |
| 50 | +from confluent_kafka import Producer, Consumer, KafkaError |
| 51 | + |
| 52 | +p = Producer({ |
| 53 | + 'bootstrap.servers': '<ccloud bootstrap servers>', |
| 54 | + 'broker.version.fallback': '0.10.0.0', |
| 55 | + 'api.version.fallback.ms': 0, |
| 56 | + 'sasl.mechanisms': 'PLAIN', |
| 57 | + 'security.protocol': 'SASL_SSL', |
| 58 | + 'sasl.username': '<ccloud key>', |
| 59 | + 'sasl.password': '<ccloud secret>' |
| 60 | +}) |
| 61 | + |
| 62 | + |
| 63 | +def acked(err, msg): |
| 64 | + """Delivery report callback called (from flush()) on successful or failed delivery of the message.""" |
| 65 | + if err is not None: |
| 66 | + print("failed to deliver message: {0}".format(err.str())) |
| 67 | + else: |
| 68 | + print("produced to: {0} [{1}] @ {2}".format(msg.topic(), msg.partition(), msg.offset())) |
| 69 | + |
| 70 | + |
| 71 | +p.produce('python-test-topic', value='python test value', callback=acked) |
| 72 | + |
| 73 | +# flush() is typically called when the producer is done sending messages to wait |
| 74 | +# for outstanding messages to be transmitted to the broker and delivery report |
| 75 | +# callbacks to get called. For continous producing you should call p.poll(0) |
| 76 | +# after each produce() call to trigger delivery report callbacks. |
| 77 | +p.flush(10) |
| 78 | + |
| 79 | +c = Consumer({ |
| 80 | + 'bootstrap.servers': '<ccloud bootstrap servers>', |
| 81 | + 'broker.version.fallback': '0.10.0.0', |
| 82 | + 'api.version.fallback.ms': 0, |
| 83 | + 'sasl.mechanisms': 'PLAIN', |
| 84 | + 'security.protocol': 'SASL_SSL', |
| 85 | + 'sasl.username': '<ccloud key>', |
| 86 | + 'sasl.password': '<ccloud secret>', |
| 87 | + 'group.id': str(uuid.uuid1()), # this will create a new consumer group on each invocation. |
| 88 | + 'default.topic.config': {'auto.offset.reset': 'smallest'} |
| 89 | +}) |
| 90 | + |
| 91 | +c.subscribe(['python-test-topic']) |
| 92 | + |
| 93 | +try: |
| 94 | + while True: |
| 95 | + msg = c.poll(0.1) # Wait for message or event/error |
| 96 | + if msg is None: |
| 97 | + # No message available within timeout. |
| 98 | + # Initial message consumption may take up to `session.timeout.ms` for |
| 99 | + # the group to rebalance and start consuming |
| 100 | + continue |
| 101 | + elif not msg.error(): |
| 102 | + print('consumed: {0}'.format(msg.value())) |
| 103 | + elif msg.error().code() == KafkaError._PARTITION_EOF: |
| 104 | + print('end of partition: {0} [{1}] @ {2}'.format(msg.topic(), msg.partition(), msg.offset())) |
| 105 | + else: |
| 106 | + print('error: {0}'.format(msg.error().str())) |
| 107 | + |
| 108 | +except KeyboardInterrupt: |
| 109 | + pass |
| 110 | + |
| 111 | +finally: |
| 112 | + # Leave group and commit final offsets |
| 113 | + c.close() |
0 commit comments