From 64da8ed9be23f700911d5077282de77d7b120bde Mon Sep 17 00:00:00 2001 From: yijxie Date: Thu, 22 Aug 2019 17:54:12 -0700 Subject: [PATCH 01/51] Small changes from code review --- .../azure-eventhubs/azure/eventhub/aio/client_async.py | 9 ++++----- sdk/eventhub/azure-eventhubs/azure/eventhub/client.py | 7 +++---- .../azure-eventhubs/azure/eventhub/client_abstract.py | 3 +-- 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index 4ad5a99f538c..a9fdb32381ef 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -221,12 +221,11 @@ def create_consumer(self, consumer_group: str, partition_id: str, event_position :caption: Add an async consumer to the client for a particular consumer group and partition. """ - owner_level = kwargs.get("owner_level", None) - operation = kwargs.get("operation", None) - prefetch = kwargs.get("prefetch", None) - loop = kwargs.get("loop", None) + owner_level = kwargs.get("owner_level") + operation = kwargs.get("operation") + prefetch = kwargs.get("prefetch") or self.config.prefetch + loop = kwargs.get("loop") - prefetch = prefetch or self.config.prefetch path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, path, consumer_group, partition_id) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index 1656ad27d074..4513cf900917 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -228,11 +228,10 @@ def create_consumer(self, consumer_group, partition_id, event_position, **kwargs :caption: Add a consumer to the client for a particular consumer group and partition. """ - owner_level = kwargs.get("owner_level", None) - operation = kwargs.get("operation", None) - prefetch = kwargs.get("prefetch", None) + owner_level = kwargs.get("owner_level") + operation = kwargs.get("operation") + prefetch = kwargs.get("prefetch") or self.config.prefetch - prefetch = prefetch or self.config.prefetch path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, path, consumer_group, partition_id) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index dd62e3e76de1..c26969bb5094 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -266,7 +266,7 @@ def from_connection_string(cls, conn_str, **kwargs): :caption: Create an EventHubClient from a connection string. """ - event_hub_path = kwargs.get("event_hub_path", None) + event_hub_path = kwargs.pop("event_hub_path") is_iot_conn_str = conn_str.lstrip().lower().startswith("hostname") if not is_iot_conn_str: address, policy, key, entity = _parse_conn_str(conn_str) @@ -276,7 +276,6 @@ def from_connection_string(cls, conn_str, **kwargs): host = address[left_slash_pos + 2:] else: host = address - kwargs.pop("event_hub_path", None) return cls(host, entity, EventHubSharedKeyCredential(policy, key), **kwargs) else: return cls._from_iothub_connection_string(conn_str, **kwargs) From d951dcf99fa7445452b6cd3a0937ee7072f370ed Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 26 Aug 2019 09:49:59 -0700 Subject: [PATCH 02/51] change EventData.msg_properties to private attribute --- .../azure-eventhubs/azure/eventhub/common.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index 9df6aa5d1c6a..ea609aa213bd 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -54,6 +54,7 @@ class EventData(object): PROP_SEQ_NUMBER = b"x-opt-sequence-number" PROP_OFFSET = b"x-opt-offset" PROP_PARTITION_KEY = b"x-opt-partition-key" + PROP_PARTITION_KEY_AMQP_SYMBOL = types.AMQPSymbol(PROP_PARTITION_KEY) PROP_TIMESTAMP = b"x-opt-enqueued-time" PROP_DEVICE_ID = b"iothub-connection-device-id" @@ -67,20 +68,19 @@ def __init__(self, body=None, to_device=None): :type to_device: str """ - self._partition_key = types.AMQPSymbol(EventData.PROP_PARTITION_KEY) self._annotations = {} self._app_properties = {} - self.msg_properties = MessageProperties() + self._msg_properties = MessageProperties() if to_device: - self.msg_properties.to = '/devices/{}/messages/devicebound'.format(to_device) + self._msg_properties.to = '/devices/{}/messages/devicebound'.format(to_device) if body and isinstance(body, list): - self.message = Message(body[0], properties=self.msg_properties) + self.message = Message(body[0], properties=self._msg_properties) for more in body[1:]: self.message._body.append(more) # pylint: disable=protected-access elif body is None: raise ValueError("EventData cannot be None.") else: - self.message = Message(body, properties=self.msg_properties) + self.message = Message(body, properties=self._msg_properties) def __str__(self): dic = { @@ -108,7 +108,7 @@ def _set_partition_key(self, value): :type value: str or bytes """ annotations = dict(self._annotations) - annotations[self._partition_key] = value + annotations[EventData.PROP_PARTITION_KEY_AMQP_SYMBOL] = value header = MessageHeader() header.durable = True self.message.annotations = annotations @@ -119,7 +119,7 @@ def _set_partition_key(self, value): def _from_message(message): event_data = EventData(body='') event_data.message = message - event_data.msg_properties = message.properties + event_data._msg_properties = message.properties event_data._annotations = message.annotations event_data._app_properties = message.application_properties return event_data @@ -175,7 +175,7 @@ def partition_key(self): :rtype: bytes """ try: - return self._annotations[self._partition_key] + return self._annotations[EventData.PROP_PARTITION_KEY_AMQP_SYMBOL] except KeyError: return self._annotations.get(EventData.PROP_PARTITION_KEY, None) From 8bbac25cb02e7d50782e8d476a02b0dae914e79e Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 26 Aug 2019 19:15:07 -0700 Subject: [PATCH 03/51] remove abstract method --- .../azure-eventhubs/azure/eventhub/client_abstract.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index c26969bb5094..030e85f39896 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -266,7 +266,7 @@ def from_connection_string(cls, conn_str, **kwargs): :caption: Create an EventHubClient from a connection string. """ - event_hub_path = kwargs.pop("event_hub_path") + event_hub_path = kwargs.pop("event_hub_path", None) is_iot_conn_str = conn_str.lstrip().lower().startswith("hostname") if not is_iot_conn_str: address, policy, key, entity = _parse_conn_str(conn_str) @@ -279,11 +279,3 @@ def from_connection_string(cls, conn_str, **kwargs): return cls(host, entity, EventHubSharedKeyCredential(policy, key), **kwargs) else: return cls._from_iothub_connection_string(conn_str, **kwargs) - - @abstractmethod - def create_consumer(self, consumer_group, partition_id, event_position, **kwargs): - pass - - @abstractmethod - def create_producer(self, partition_id=None, operation=None, send_timeout=None): - pass From 70a33d068f36f5883be33e2b460d66e8088dec01 Mon Sep 17 00:00:00 2001 From: yijxie Date: Wed, 28 Aug 2019 14:10:48 -0700 Subject: [PATCH 04/51] code clean 1 --- sdk/eventhub/azure-eventhubs/azure/__init__.py | 2 +- .../azure/eventhub/_connection_manager.py | 2 +- .../azure/eventhub/_consumer_producer_mixin.py | 2 +- .../azure/eventhub/aio/_connection_manager_async.py | 4 ++-- .../eventhub/aio/_consumer_producer_mixin_async.py | 2 +- .../azure/eventhub/aio/producer_async.py | 2 +- sdk/eventhub/azure-eventhubs/azure/eventhub/client.py | 10 +++++----- .../azure-eventhubs/azure/eventhub/client_abstract.py | 5 +++-- sdk/eventhub/azure-eventhubs/azure/eventhub/common.py | 4 ++-- .../azure-eventhubs/azure/eventhub/configuration.py | 2 +- sdk/eventhub/azure-eventhubs/azure/eventhub/error.py | 2 +- .../azure-eventhubs/azure/eventhub/producer.py | 2 +- 12 files changed, 20 insertions(+), 19 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/__init__.py b/sdk/eventhub/azure-eventhubs/azure/__init__.py index 66c5d46008f7..62351a0ab30b 100644 --- a/sdk/eventhub/azure-eventhubs/azure/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/__init__.py @@ -2,4 +2,4 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -__path__ = __import__('pkgutil').extend_path(__path__, __name__) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py index 166703d698e7..505b198ff910 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py @@ -4,7 +4,7 @@ # -------------------------------------------------------------------------------------------- from threading import RLock -from uamqp import Connection, TransportType, c_uamqp +from uamqp import Connection, TransportType, c_uamqp # type: ignore class _SharedConnectionManager(object): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py index 8e3cae1e8a0b..0c29fc4b1ad9 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py @@ -119,7 +119,7 @@ def close(self, exception=None): """ self.running = False - if self.error: + if self.error: # type: ignore return if isinstance(exception, errors.LinkRedirect): self.redirected = exception diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py index 618359192ffe..6b70f72cbe3e 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py @@ -4,8 +4,8 @@ # -------------------------------------------------------------------------------------------- from asyncio import Lock -from uamqp import TransportType, c_uamqp -from uamqp.async_ops import ConnectionAsync +from uamqp import TransportType, c_uamqp # type: ignore +from uamqp.async_ops import ConnectionAsync # type: ignore class _SharedConnectionManager(object): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py index 8ccdcdddcd47..0b94893848bf 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py @@ -120,7 +120,7 @@ async def close(self, exception=None): """ self.running = False - if self.error: + if self.error: #type: ignore return if isinstance(exception, errors.LinkRedirect): self.redirected = exception diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index c45cf9a6b283..7a911be27ab0 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -224,7 +224,7 @@ async def send(self, event_data, *, partition_key=None, timeout=None): if isinstance(event_data, EventDataBatch): if partition_key and not (partition_key == event_data._partition_key): # pylint: disable=protected-access raise EventDataError('The partition_key does not match the one of the EventDataBatch') - wrapper_event_data = event_data + wrapper_event_data = event_data #type: ignore else: if partition_key: event_data = _set_partition_key(event_data, partition_key) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index 4513cf900917..c6d0ced931b2 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -9,15 +9,15 @@ import functools try: from urlparse import urlparse - from urllib import unquote_plus, urlencode, quote_plus + from urllib import unquote_plus, urlencode, quote_plus # type: ignore except ImportError: from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus from typing import Any, List, Dict -import uamqp -from uamqp import Message -from uamqp import authentication -from uamqp import constants +import uamqp # type: ignore +from uamqp import Message # type: ignore +from uamqp import authentication # type: ignore +from uamqp import constants # type: ignore from azure.eventhub.producer import EventHubProducer from azure.eventhub.consumer import EventHubConsumer diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 030e85f39896..32b9993091da 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -10,9 +10,10 @@ import time import functools from abc import abstractmethod +from typing import Dict try: from urlparse import urlparse - from urllib import unquote_plus, urlencode, quote_plus + from urllib import unquote_plus, urlencode, quote_plus # type: ignore except ImportError: from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus @@ -140,7 +141,7 @@ def __init__(self, host, event_hub_path, credential, **kwargs): self.address = _Address() self.address.hostname = host self.address.path = "/" + event_hub_path if event_hub_path else "" - self._auth_config = {} + self._auth_config = {} # type:Dict[str,str] self.credential = credential if isinstance(credential, EventHubSharedKeyCredential): self.username = credential.policy diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index ea609aa213bd..cb8e1a7103b1 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -11,8 +11,8 @@ import logging from azure.eventhub.error import EventDataError -from uamqp import BatchMessage, Message, types, constants -from uamqp.message import MessageHeader, MessageProperties +from uamqp import BatchMessage, Message, types, constants # type: ignore +from uamqp.message import MessageHeader, MessageProperties # type: ignore log = logging.getLogger(__name__) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py index 1de21811f2d3..5be9df85c732 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py @@ -2,7 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -from uamqp.constants import TransportType +from uamqp.constants import TransportType # type: ignore class _Configuration(object): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py index cbe6a8a04946..f48be4e557ea 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py @@ -6,7 +6,7 @@ import time import logging -from uamqp import constants, errors, compat +from uamqp import constants, errors, compat # type: ignore _NO_RETRY_ERRORS = ( diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py index 8655b16bd451..8a7ed5349e90 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py @@ -231,7 +231,7 @@ def send(self, event_data, partition_key=None, timeout=None): if isinstance(event_data, EventDataBatch): # The partition_key in the param will be omitted. if partition_key and not (partition_key == event_data._partition_key): # pylint: disable=protected-access raise EventDataError('The partition_key does not match the one of the EventDataBatch') - wrapper_event_data = event_data + wrapper_event_data = event_data # type:ignore else: if partition_key: event_data = _set_partition_key(event_data, partition_key) From abbdd252e361d76dae951736d4d0ff9f6761abcb Mon Sep 17 00:00:00 2001 From: yijxie Date: Wed, 28 Aug 2019 14:20:37 -0700 Subject: [PATCH 05/51] code clean 2 --- sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py | 2 +- .../azure/eventhub/_consumer_producer_mixin.py | 2 +- .../azure/eventhub/aio/_consumer_producer_mixin_async.py | 2 +- .../azure-eventhubs/azure/eventhub/aio/client_async.py | 4 ++-- .../azure-eventhubs/azure/eventhub/aio/consumer_async.py | 4 ++-- .../azure-eventhubs/azure/eventhub/aio/error_async.py | 2 +- .../azure-eventhubs/azure/eventhub/aio/producer_async.py | 4 ++-- sdk/eventhub/azure-eventhubs/azure/eventhub/client.py | 2 +- .../azure-eventhubs/azure/eventhub/client_abstract.py | 4 ++-- sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py | 4 ++-- sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py | 4 ++-- 11 files changed, 17 insertions(+), 17 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py index 0ccd9d5ec73c..74627c8bf854 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py @@ -11,7 +11,7 @@ from azure.eventhub.client import EventHubClient from azure.eventhub.producer import EventHubProducer from azure.eventhub.consumer import EventHubConsumer -from uamqp import constants +from uamqp import constants # type: ignore from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential TransportType = constants.TransportType diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py index 0c29fc4b1ad9..108d3772eeee 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py @@ -7,7 +7,7 @@ import logging import time -from uamqp import errors, constants, compat +from uamqp import errors, constants, compat # type: ignore from azure.eventhub.error import EventHubError, _handle_exception log = logging.getLogger(__name__) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py index 0b94893848bf..49a4e071f921 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py @@ -6,7 +6,7 @@ import logging import time -from uamqp import errors, constants, compat +from uamqp import errors, constants, compat # type: ignore from azure.eventhub.error import EventHubError, ConnectError from ..aio.error_async import _handle_exception diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index a9fdb32381ef..fa03a3d5324d 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -8,11 +8,11 @@ import asyncio from typing import Any, List, Dict -from uamqp import authentication, constants +from uamqp import authentication, constants # type: ignore from uamqp import ( Message, AMQPClientAsync, -) +) # type: ignore from azure.eventhub.common import parse_sas_token, EventPosition, EventHubSharedKeyCredential, EventHubSASTokenCredential from ..client_abstract import EventHubClientAbstract diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py index 404fc23312f0..2814b0c54b4a 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py @@ -8,8 +8,8 @@ from typing import List import time -from uamqp import errors, types -from uamqp import ReceiveClientAsync, Source +from uamqp import errors, types # type: ignore +from uamqp import ReceiveClientAsync, Source # type: ignore from azure.eventhub import EventData, EventPosition from azure.eventhub.error import EventHubError, ConnectError, _error_handler diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py index b44f8cb54a33..51d5ac8ad0f1 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py @@ -2,7 +2,7 @@ import time import logging -from uamqp import errors, compat +from uamqp import errors, compat # type: ignore from ..error import EventHubError, EventDataSendError, \ EventDataError, ConnectError, ConnectionLostError, AuthenticationError diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index 7a911be27ab0..96be7e90ed15 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -8,8 +8,8 @@ from typing import Iterable, Union import time -from uamqp import types, constants, errors -from uamqp import SendClientAsync +from uamqp import types, constants, errors # type: ignore +from uamqp import SendClientAsync # type: ignore from azure.eventhub.common import EventData, EventDataBatch from azure.eventhub.error import _error_handler, OperationTimeoutError, EventDataError diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index c6d0ced931b2..872ba98f48e0 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -8,7 +8,7 @@ import datetime import functools try: - from urlparse import urlparse + from urlparse import urlparse # type: ignore from urllib import unquote_plus, urlencode, quote_plus # type: ignore except ImportError: from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 32b9993091da..d861b385336c 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -12,7 +12,7 @@ from abc import abstractmethod from typing import Dict try: - from urlparse import urlparse + from urlparse import urlparse # type: ignore from urllib import unquote_plus, urlencode, quote_plus # type: ignore except ImportError: from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus @@ -22,7 +22,7 @@ except ImportError: TYPE_CHECKING = False if TYPE_CHECKING: - from azure.core.credentials import TokenCredential + from azure.core.credentials import TokenCredential # type: ignore from typing import Union, Any from azure.eventhub import __version__ diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py index 1f2b6db17728..883840d7d720 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py @@ -9,8 +9,8 @@ import time from typing import List -from uamqp import types, errors -from uamqp import ReceiveClient, Source +from uamqp import types, errors # type: ignore +from uamqp import ReceiveClient, Source # type: ignore from azure.eventhub.common import EventData, EventPosition from azure.eventhub.error import _error_handler diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py index 8a7ed5349e90..96c647909043 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py @@ -9,8 +9,8 @@ import time from typing import Iterable, Union -from uamqp import types, constants, errors -from uamqp import SendClient +from uamqp import types, constants, errors # type: ignore +from uamqp import SendClient # type: ignore from azure.eventhub.common import EventData, EventDataBatch from azure.eventhub.error import _error_handler, OperationTimeoutError, EventDataError From b45d6b3647403b41791d625b2b683d18cd5f7838 Mon Sep 17 00:00:00 2001 From: yijxie Date: Wed, 28 Aug 2019 17:35:35 -0700 Subject: [PATCH 06/51] Fix pylint --- .../azure/eventhub/__init__.py | 3 +- .../azure/eventhub/_connection_manager.py | 14 ++++---- .../eventhub/_consumer_producer_mixin.py | 6 ++-- .../eventhub/aio/_connection_manager_async.py | 12 +++---- .../aio/_consumer_producer_mixin_async.py | 10 +++--- .../azure/eventhub/aio/client_async.py | 31 ++++++++++++----- .../azure/eventhub/aio/consumer_async.py | 20 +++++------ .../azure/eventhub/aio/error_async.py | 32 +++++++++-------- .../azure/eventhub/aio/producer_async.py | 4 +-- .../azure-eventhubs/azure/eventhub/client.py | 19 +++++------ .../azure/eventhub/client_abstract.py | 25 ++++++-------- .../azure-eventhubs/azure/eventhub/common.py | 20 +++++------ .../azure/eventhub/configuration.py | 2 +- .../azure/eventhub/consumer.py | 20 +++++------ .../azure-eventhubs/azure/eventhub/error.py | 34 ++++++++----------- .../azure/eventhub/producer.py | 13 ++++--- 16 files changed, 136 insertions(+), 129 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py index 74627c8bf854..040d00c947d8 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py @@ -4,14 +4,13 @@ # -------------------------------------------------------------------------------------------- __version__ = "5.0.0b2" - +from uamqp import constants # type: ignore from azure.eventhub.common import EventData, EventDataBatch, EventPosition from azure.eventhub.error import EventHubError, EventDataError, ConnectError, \ AuthenticationError, EventDataSendError, ConnectionLostError from azure.eventhub.client import EventHubClient from azure.eventhub.producer import EventHubProducer from azure.eventhub.consumer import EventHubConsumer -from uamqp import constants # type: ignore from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential TransportType = constants.TransportType diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py index 505b198ff910..77c12a376f97 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py @@ -7,7 +7,7 @@ from uamqp import Connection, TransportType, c_uamqp # type: ignore -class _SharedConnectionManager(object): +class _SharedConnectionManager(object): #pylint:disable=too-many-instance-attributes def __init__(self, **kwargs): self._lock = RLock() self._conn = None # type: Connection @@ -50,11 +50,11 @@ def close_connection(self): def reset_connection_if_broken(self): with self._lock: - if self._conn and self._conn._state in ( - c_uamqp.ConnectionState.CLOSE_RCVD, - c_uamqp.ConnectionState.CLOSE_SENT, - c_uamqp.ConnectionState.DISCARDING, - c_uamqp.ConnectionState.END, + if self._conn and self._conn._state in ( # pylint:disable=protected-access + c_uamqp.ConnectionState.CLOSE_RCVD, # pylint:disable=c-extension-no-member + c_uamqp.ConnectionState.CLOSE_SENT, # pylint:disable=c-extension-no-member + c_uamqp.ConnectionState.DISCARDING, # pylint:disable=c-extension-no-member + c_uamqp.ConnectionState.END, # pylint:disable=c-extension-no-member ): self._conn = None @@ -63,7 +63,7 @@ class _SeparateConnectionManager(object): def __init__(self, **kwargs): pass - def get_connection(self, host, auth): + def get_connection(self, host, auth): # pylint:disable=unused-argument, no-self-use return None def close_connection(self): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py index 108d3772eeee..837aaab26d78 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py @@ -25,8 +25,8 @@ def wrapped_func(self, *args, **kwargs): while True: try: return to_be_wrapped_func(self, timeout_time=timeout_time, last_exception=last_exception, **kwargs) - except Exception as exception: - last_exception = self._handle_exception(exception, retry_count, max_retries, timeout_time) + except Exception as exception: # pylint:disable=broad-except + last_exception = self._handle_exception(exception, retry_count, max_retries, timeout_time) # pylint:disable=protected-access retry_count += 1 return wrapped_func @@ -89,7 +89,7 @@ def _close_handler(self): def _close_connection(self): self._close_handler() - self.client._conn_manager.reset_connection_if_broken() + self.client._conn_manager.reset_connection_if_broken() # pylint: disable=protected-access def _handle_exception(self, exception, retry_count, max_retries, timeout_time): if not self.running and isinstance(exception, compat.TimeoutException): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py index 6b70f72cbe3e..2b38f2fd220e 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py @@ -8,7 +8,7 @@ from uamqp.async_ops import ConnectionAsync # type: ignore -class _SharedConnectionManager(object): +class _SharedConnectionManager(object): # pylint:disable=too-many-instance-attributes def __init__(self, **kwargs): self._lock = Lock() self._conn = None @@ -51,11 +51,11 @@ async def close_connection(self): async def reset_connection_if_broken(self): async with self._lock: - if self._conn and self._conn._state in ( - c_uamqp.ConnectionState.CLOSE_RCVD, - c_uamqp.ConnectionState.CLOSE_SENT, - c_uamqp.ConnectionState.DISCARDING, - c_uamqp.ConnectionState.END, + if self._conn and self._conn._state in ( # pylint:disable=protected-access + c_uamqp.ConnectionState.CLOSE_RCVD, # pylint:disable=c-extension-no-member + c_uamqp.ConnectionState.CLOSE_SENT, # pylint:disable=c-extension-no-member + c_uamqp.ConnectionState.DISCARDING, # pylint:disable=c-extension-no-member + c_uamqp.ConnectionState.END, # pylint:disable=c-extension-no-member ): self._conn = None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py index 49a4e071f921..0ca5889aa972 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py @@ -24,8 +24,10 @@ async def wrapped_func(self, *args, **kwargs): last_exception = None while True: try: - return await to_be_wrapped_func(self, timeout_time=timeout_time, last_exception=last_exception, **kwargs) - except Exception as exception: + return await to_be_wrapped_func( + self, timeout_time=timeout_time, last_exception=last_exception, **kwargs + ) + except Exception as exception: # pylint:disable=broad-except last_exception = await self._handle_exception(exception, retry_count, max_retries, timeout_time) retry_count += 1 return wrapped_func @@ -90,7 +92,7 @@ async def _close_handler(self): async def _close_connection(self): await self._close_handler() - await self.client._conn_manager.reset_connection_if_broken() + await self.client._conn_manager.reset_connection_if_broken() # pylint:disable=protected-access async def _handle_exception(self, exception, retry_count, max_retries, timeout_time): if not self.running and isinstance(exception, compat.TimeoutException): @@ -133,4 +135,4 @@ async def close(self, exception=None): else: self.error = EventHubError("This receive handler is now closed.") if self._handler: - await self._handler.close_async() \ No newline at end of file + await self._handler.close_async() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index fa03a3d5324d..1c02466a9add 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -6,7 +6,7 @@ import datetime import functools import asyncio -from typing import Any, List, Dict +from typing import Any, List, Dict, Union, TYPE_CHECKING from uamqp import authentication, constants # type: ignore from uamqp import ( @@ -14,7 +14,8 @@ AMQPClientAsync, ) # type: ignore -from azure.eventhub.common import parse_sas_token, EventPosition, EventHubSharedKeyCredential, EventHubSASTokenCredential +from azure.eventhub.common import parse_sas_token, EventPosition, \ + EventHubSharedKeyCredential, EventHubSASTokenCredential from ..client_abstract import EventHubClientAbstract from .producer_async import EventHubProducer @@ -22,6 +23,8 @@ from ._connection_manager_async import get_connection_manager from .error_async import _handle_exception +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential # type: ignore log = logging.getLogger(__name__) @@ -42,7 +45,9 @@ class EventHubClient(EventHubClientAbstract): """ def __init__(self, host, event_hub_path, credential, **kwargs): - super(EventHubClient, self).__init__(host, event_hub_path, credential, **kwargs) + # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], ...) -> None + + super(EventHubClient, self).__init__(host=host, event_hub_path=event_hub_path, credential=credential, **kwargs) self._conn_manager = get_connection_manager(**kwargs) async def __aenter__(self): @@ -65,7 +70,7 @@ def _create_auth(self, username=None, password=None): transport_type = self.config.transport_type auth_timeout = self.config.auth_timeout - if isinstance(self.credential, EventHubSharedKeyCredential): + if isinstance(self.credential, EventHubSharedKeyCredential): # pylint:disable=no-else-return username = username or self._auth_config['username'] password = password or self._auth_config['password'] if "@sas.root" in username: @@ -116,7 +121,7 @@ async def _management_request(self, mgmt_msg, op_type): status_code_field=b'status-code', description_fields=b'status-description') return response - except Exception as exception: + except Exception as exception: # pylint:disable=broad-except await self._handle_exception(exception, retry_count, max_retries) retry_count += 1 finally: @@ -190,7 +195,12 @@ async def get_partition_properties(self, partition): output['is_empty'] = partition_info[b'is_partition_empty'] return output - def create_consumer(self, consumer_group: str, partition_id: str, event_position: EventPosition, **kwargs): + def create_consumer( + self, + consumer_group: str, + partition_id: str, + event_position: EventPosition, **kwargs + ) -> EventHubConsumer: """ Create an async consumer to the client for a particular consumer group and partition. @@ -234,8 +244,13 @@ def create_consumer(self, consumer_group: str, partition_id: str, event_position prefetch=prefetch, loop=loop) return handler - def create_producer(self, *, partition_id=None, operation=None, send_timeout=None, loop=None): - # type: (str, str, float, asyncio.AbstractEventLoop) -> EventHubProducer + def create_producer( + self, *, + partition_id: str = None, + operation: str = None, + send_timeout: float = None, + loop: asyncio.AbstractEventLoop = None + ) -> EventHubProducer: """ Create an async producer to send EventData object to an EventHub. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py index 2814b0c54b4a..3747d1af4d9d 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py @@ -18,7 +18,7 @@ log = logging.getLogger(__name__) -class EventHubConsumer(ConsumerProducerMixin): +class EventHubConsumer(ConsumerProducerMixin): # pylint:disable=too-many-instance-attributes """ A consumer responsible for reading EventData from a specific Event Hub partition and as a member of a specific consumer group. @@ -100,21 +100,21 @@ async def __anext__(self): if not self.messages_iter: self.messages_iter = self._handler.receive_messages_iter_async() message = await self.messages_iter.__anext__() - event_data = EventData._from_message(message) + event_data = EventData._from_message(message) # pylint:disable=protected-access self.offset = EventPosition(event_data.offset, inclusive=False) retry_count = 0 return event_data - except Exception as exception: - await self._handle_exception(exception, retry_count, max_retries) + except Exception as exception: # pylint:disable=broad-except + await self._handle_exception(exception, retry_count, max_retries, timeout_time=None) retry_count += 1 def _create_handler(self): alt_creds = { - "username": self.client._auth_config.get("iot_username"), - "password": self.client._auth_config.get("iot_password")} + "username": self.client._auth_config.get("iot_username"), # pylint:disable=protected-access + "password": self.client._auth_config.get("iot_password")} # pylint:disable=protected-access source = Source(self.source) if self.offset is not None: - source.set_filter(self.offset._selector()) + source.set_filter(self.offset._selector()) # pylint:disable=protected-access self._handler = ReceiveClientAsync( source, auth=self.client.get_auth(**alt_creds), @@ -125,8 +125,8 @@ def _create_handler(self): error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client._create_properties( - self.client.config.user_agent), # pylint: disable=protected-access + properties=self.client._create_properties( # pylint:disable=protected-access + self.client.config.user_agent), loop=self.loop) self.messages_iter = None @@ -164,7 +164,7 @@ async def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): max_batch_size=max_batch_size, timeout=remaining_time_ms) for message in message_batch: - event_data = EventData._from_message(message) + event_data = EventData._from_message(message) # pylint:disable=protected-access self.offset = EventPosition(event_data.offset) data_batch.append(event_data) return data_batch diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py index 51d5ac8ad0f1..5d0cff3ebc1d 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py @@ -1,3 +1,7 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- import asyncio import time import logging @@ -32,20 +36,20 @@ def _create_eventhub_exception(exception): return error -async def _handle_exception(exception, retry_count, max_retries, closable, timeout_time=None): +async def _handle_exception(exception, retry_count, max_retries, closable, timeout_time=None): # pylint:disable=too-many-branches, too-many-statements if isinstance(exception, asyncio.CancelledError): - raise + raise exception try: name = closable.name except AttributeError: name = closable.container_id - if isinstance(exception, KeyboardInterrupt): + if isinstance(exception, KeyboardInterrupt): # pylint:disable=no-else-raise log.info("%r stops due to keyboard interrupt", name) closable.close() - raise + raise exception elif isinstance(exception, EventHubError): closable.close() - raise + raise exception elif isinstance(exception, ( errors.MessageAccepted, errors.MessageAlreadySettled, @@ -68,29 +72,29 @@ async def _handle_exception(exception, retry_count, max_retries, closable, timeo else: if isinstance(exception, errors.AuthenticationException): if hasattr(closable, "_close_connection"): - await closable._close_connection() + await closable._close_connection() # pylint:disable=protected-access elif isinstance(exception, errors.LinkRedirect): log.info("%r link redirect received. Redirecting...", name) redirect = exception if hasattr(closable, "_redirect"): - await closable._redirect(redirect) + await closable._redirect(redirect) # pylint:disable=protected-access elif isinstance(exception, errors.LinkDetach): if hasattr(closable, "_close_handler"): - await closable._close_handler() + await closable._close_handler() # pylint:disable=protected-access elif isinstance(exception, errors.ConnectionClose): if hasattr(closable, "_close_connection"): - await closable._close_connection() + await closable._close_connection() # pylint:disable=protected-access elif isinstance(exception, errors.MessageHandlerError): if hasattr(closable, "_close_handler"): - await closable._close_handler() + await closable._close_handler() # pylint:disable=protected-access elif isinstance(exception, errors.AMQPConnectionError): if hasattr(closable, "_close_connection"): - await closable._close_connection() + await closable._close_connection() # pylint:disable=protected-access elif isinstance(exception, compat.TimeoutException): pass # Timeout doesn't need to recreate link or connection to retry else: if hasattr(closable, "_close_connection"): - await closable._close_connection() + await closable._close_connection() # pylint:disable=protected-access # start processing retry delay try: backoff_factor = closable.client.config.backoff_factor @@ -99,7 +103,7 @@ async def _handle_exception(exception, retry_count, max_retries, closable, timeo backoff_factor = closable.config.backoff_factor backoff_max = closable.config.backoff_max backoff = backoff_factor * 2 ** retry_count - if backoff <= backoff_max and (timeout_time is None or time.time() + backoff <= timeout_time): + if backoff <= backoff_max and (timeout_time is None or time.time() + backoff <= timeout_time): # pylint:disable=no-else-return await asyncio.sleep(backoff) log.info("%r has an exception (%r). Retrying...", format(name), exception) return _create_eventhub_exception(exception) @@ -107,4 +111,4 @@ async def _handle_exception(exception, retry_count, max_retries, closable, timeo error = _create_eventhub_exception(exception) log.info("%r operation has timed out. Last exception before timeout is (%r)", name, error) raise error - # end of processing retry delay \ No newline at end of file + # end of processing retry delay diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index 96be7e90ed15..1f4fde946cf2 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -5,7 +5,7 @@ import uuid import asyncio import logging -from typing import Iterable, Union +from typing import Iterable, Union, Any import time from uamqp import types, constants, errors # type: ignore @@ -185,7 +185,7 @@ async def create_batch(self, max_size=None, partition_key=None): return EventDataBatch(max_size=(max_size or self._max_message_size_on_link), partition_key=partition_key) async def send(self, event_data, *, partition_key=None, timeout=None): - # type:(Union[EventData, EventDataBatch, Iterable[EventData]], Union[str, bytes], float) -> None + # type:(Union[EventData, EventDataBatch, Iterable[EventData]],Any, Union[str, bytes], float) -> None """ Sends an event data and blocks until acknowledgement is received or operation times out. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index 872ba98f48e0..deb07ce8dba7 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -7,12 +7,7 @@ import logging import datetime import functools -try: - from urlparse import urlparse # type: ignore - from urllib import unquote_plus, urlencode, quote_plus # type: ignore -except ImportError: - from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus -from typing import Any, List, Dict +from typing import Any, List, Dict, Union, TYPE_CHECKING import uamqp # type: ignore from uamqp import Message # type: ignore @@ -27,6 +22,8 @@ from ._connection_manager import get_connection_manager from .error import _handle_exception +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential # type: ignore log = logging.getLogger(__name__) @@ -47,7 +44,8 @@ class EventHubClient(EventHubClientAbstract): """ def __init__(self, host, event_hub_path, credential, **kwargs): - super(EventHubClient, self).__init__(host, event_hub_path, credential, **kwargs) + # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], ...) -> None + super(EventHubClient, self).__init__(host=host, event_hub_path=event_hub_path, credential=credential, **kwargs) self._conn_manager = get_connection_manager(**kwargs) def __enter__(self): @@ -71,7 +69,7 @@ def _create_auth(self, username=None, password=None): auth_timeout = self.config.auth_timeout # TODO: the following code can be refactored to create auth from classes directly instead of using if-else - if isinstance(self.credential, EventHubSharedKeyCredential): + if isinstance(self.credential, EventHubSharedKeyCredential): # pylint:disable=no-else-return username = username or self._auth_config['username'] password = password or self._auth_config['password'] if "@sas.root" in username: @@ -114,7 +112,7 @@ def _management_request(self, mgmt_msg, op_type): mgmt_auth = self._create_auth() mgmt_client = uamqp.AMQPClient(self.mgmt_target) try: - conn = self._conn_manager.get_connection(self.host, mgmt_auth) + conn = self._conn_manager.get_connection(self.host, mgmt_auth) #pylint:disable=assignment-from-none mgmt_client.open(connection=conn) response = mgmt_client.mgmt_request( mgmt_msg, @@ -123,7 +121,7 @@ def _management_request(self, mgmt_msg, op_type): status_code_field=b'status-code', description_fields=b'status-description') return response - except Exception as exception: + except Exception as exception: # pylint: disable=broad-except self._handle_exception(exception, retry_count, max_retries) retry_count += 1 finally: @@ -277,4 +275,5 @@ def create_producer(self, partition_id=None, operation=None, send_timeout=None): return handler def close(self): + # type:() -> None self._conn_manager.close_connection() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index d861b385336c..068bd26810a8 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -10,24 +10,19 @@ import time import functools from abc import abstractmethod -from typing import Dict +from typing import Dict, Union, TYPE_CHECKING +from azure.eventhub import __version__ +from azure.eventhub.configuration import _Configuration +from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential, _Address + try: from urlparse import urlparse # type: ignore - from urllib import unquote_plus, urlencode, quote_plus # type: ignore + from urllib import urlencode, quote_plus # type: ignore except ImportError: - from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus + from urllib.parse import urlparse, urlencode, quote_plus -try: - from typing import TYPE_CHECKING -except ImportError: - TYPE_CHECKING = False if TYPE_CHECKING: from azure.core.credentials import TokenCredential # type: ignore - from typing import Union, Any - -from azure.eventhub import __version__ -from azure.eventhub.configuration import _Configuration -from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential, _Address log = logging.getLogger(__name__) MAX_USER_AGENT_LENGTH = 512 @@ -88,14 +83,14 @@ def _build_uri(address, entity): return address -class EventHubClientAbstract(object): +class EventHubClientAbstract(object): # pylint:disable=too-many-instance-attributes """ The EventHubClientAbstract class defines a high level interface for sending events to and receiving events from the Azure Event Hubs service. """ def __init__(self, host, event_hub_path, credential, **kwargs): - # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], Any) -> None + # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], ...) -> None """ Constructs a new EventHubClient. @@ -269,7 +264,7 @@ def from_connection_string(cls, conn_str, **kwargs): """ event_hub_path = kwargs.pop("event_hub_path", None) is_iot_conn_str = conn_str.lstrip().lower().startswith("hostname") - if not is_iot_conn_str: + if not is_iot_conn_str: # pylint:disable=no-else-return address, policy, key, entity = _parse_conn_str(conn_str) entity = event_hub_path or entity left_slash_pos = address.find("//") diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index cb8e1a7103b1..56ea25863be3 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -7,12 +7,12 @@ import datetime import calendar import json -import six import logging +import six -from azure.eventhub.error import EventDataError from uamqp import BatchMessage, Message, types, constants # type: ignore from uamqp.message import MessageHeader, MessageProperties # type: ignore +from azure.eventhub.error import EventDataError log = logging.getLogger(__name__) @@ -119,9 +119,9 @@ def _set_partition_key(self, value): def _from_message(message): event_data = EventData(body='') event_data.message = message - event_data._msg_properties = message.properties - event_data._annotations = message.annotations - event_data._app_properties = message.application_properties + event_data._msg_properties = message.properties # pylint:disable=protected-access + event_data._annotations = message.annotations # pylint:disable=protected-access + event_data._app_properties = message.application_properties # pylint:disable=protected-access return event_data @property @@ -281,7 +281,7 @@ def size(self): @staticmethod def _from_batch(batch_data, partition_key=None): batch_data_instance = EventDataBatch(partition_key=partition_key) - batch_data_instance.message._body_gen = batch_data + batch_data_instance.message._body_gen = batch_data # pylint:disable=protected-access return batch_data_instance def _set_partition_key(self, value): @@ -308,10 +308,10 @@ def try_add(self, event_data): raise TypeError('event_data should be type of EventData') if self._partition_key: - if event_data.partition_key and not (event_data.partition_key == self._partition_key): + if event_data.partition_key and not event_data.partition_key == self._partition_key: raise EventDataError('The partition_key of event_data does not match the one of the EventDataBatch') if not event_data.partition_key: - event_data._set_partition_key(self._partition_key) + event_data._set_partition_key(self._partition_key) # pylint:disable=protected-access event_data_size = event_data.message.get_message_encoded_size() @@ -368,7 +368,7 @@ def _selector(self): :rtype: bytes """ operator = ">=" if self.inclusive else ">" - if isinstance(self.value, datetime.datetime): + if isinstance(self.value, datetime.datetime): # pylint:disable=no-else-return timestamp = (calendar.timegm(self.value.utctimetuple()) * 1000) + (self.value.microsecond/1000) return ("amqp.annotation.x-opt-enqueued-time {} '{}'".format(operator, int(timestamp))).encode('utf-8') elif isinstance(self.value, six.integer_types): @@ -391,7 +391,7 @@ def __init__(self, token): self.token = token def get_sas_token(self): - if callable(self.token): + if callable(self.token): # pylint:disable=no-else-return return self.token() else: return self.token diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py index 5be9df85c732..e7ea5b43df46 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py @@ -5,7 +5,7 @@ from uamqp.constants import TransportType # type: ignore -class _Configuration(object): +class _Configuration(object): # pylint:disable=too-many-instance-attributes def __init__(self, **kwargs): self.user_agent = kwargs.get("user_agent") self.retry_total = kwargs.get('retry_total', 3) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py index 883840d7d720..44be38386b73 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py @@ -20,7 +20,7 @@ log = logging.getLogger(__name__) -class EventHubConsumer(ConsumerProducerMixin): +class EventHubConsumer(ConsumerProducerMixin): # pylint:disable=too-many-instance-attributes """ A consumer responsible for reading EventData from a specific Event Hub partition and as a member of a specific consumer group. @@ -96,21 +96,21 @@ def __next__(self): if not self.messages_iter: self.messages_iter = self._handler.receive_messages_iter() message = next(self.messages_iter) - event_data = EventData._from_message(message) + event_data = EventData._from_message(message) # pylint:disable=protected-access self.offset = EventPosition(event_data.offset, inclusive=False) retry_count = 0 return event_data - except Exception as exception: - self._handle_exception(exception, retry_count, max_retries) + except Exception as exception: # pylint:disable=broad-except + self._handle_exception(exception, retry_count, max_retries, timeout_time=None) retry_count += 1 def _create_handler(self): alt_creds = { - "username": self.client._auth_config.get("iot_username"), - "password": self.client._auth_config.get("iot_password")} + "username": self.client._auth_config.get("iot_username"), # pylint:disable=protected-access + "password": self.client._auth_config.get("iot_password")} # pylint:disable=protected-access source = Source(self.source) if self.offset is not None: - source.set_filter(self.offset._selector()) + source.set_filter(self.offset._selector()) # pylint:disable=protected-access self._handler = ReceiveClient( source, auth=self.client.get_auth(**alt_creds), @@ -121,8 +121,8 @@ def _create_handler(self): error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client._create_properties( - self.client.config.user_agent)) # pylint: disable=protected-access + properties=self.client._create_properties( # pylint:disable=protected-access + self.client.config.user_agent)) self.messages_iter = None def _redirect(self, redirect): @@ -158,7 +158,7 @@ def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): max_batch_size=max_batch_size - (len(data_batch) if data_batch else 0), timeout=remaining_time_ms) for message in message_batch: - event_data = EventData._from_message(message) + event_data = EventData._from_message(message) # pylint:disable=protected-access self.offset = EventPosition(event_data.offset) data_batch.append(event_data) return data_batch diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py index f48be4e557ea..72b11f5478ad 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py @@ -2,11 +2,11 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -import six import time import logging +import six -from uamqp import constants, errors, compat # type: ignore +from uamqp import errors, compat # type: ignore _NO_RETRY_ERRORS = ( @@ -102,14 +102,12 @@ class ConnectionLostError(EventHubError): """Connection to event hub is lost. SDK will retry. So this shouldn't happen. """ - pass class ConnectError(EventHubError): """Fail to connect to event hubs """ - pass class AuthenticationError(ConnectError): @@ -117,28 +115,24 @@ class AuthenticationError(ConnectError): """ - pass class EventDataError(EventHubError): """Problematic event data so the send will fail at client side """ - pass class EventDataSendError(EventHubError): """Service returns error while an event data is being sent """ - pass class OperationTimeoutError(EventHubError): """Operation times out """ - pass def _create_eventhub_exception(exception): @@ -163,18 +157,18 @@ def _create_eventhub_exception(exception): return error -def _handle_exception(exception, retry_count, max_retries, closable, timeout_time=None): +def _handle_exception(exception, retry_count, max_retries, closable, timeout_time=None): # pylint:disable=too-many-branches, too-many-statements try: name = closable.name except AttributeError: name = closable.container_id - if isinstance(exception, KeyboardInterrupt): + if isinstance(exception, KeyboardInterrupt): # pylint:disable=no-else-raise log.info("%r stops due to keyboard interrupt", name) closable.close() - raise + raise exception elif isinstance(exception, EventHubError): closable.close() - raise + raise exception elif isinstance(exception, ( errors.MessageAccepted, errors.MessageAlreadySettled, @@ -197,29 +191,29 @@ def _handle_exception(exception, retry_count, max_retries, closable, timeout_tim else: if isinstance(exception, errors.AuthenticationException): if hasattr(closable, "_close_connection"): - closable._close_connection() + closable._close_connection() # pylint:disable=protected-access elif isinstance(exception, errors.LinkRedirect): log.info("%r link redirect received. Redirecting...", name) redirect = exception if hasattr(closable, "_redirect"): - closable._redirect(redirect) + closable._redirect(redirect) # pylint:disable=protected-access elif isinstance(exception, errors.LinkDetach): if hasattr(closable, "_close_handler"): - closable._close_handler() + closable._close_handler() # pylint:disable=protected-access elif isinstance(exception, errors.ConnectionClose): if hasattr(closable, "_close_connection"): - closable._close_connection() + closable._close_connection() # pylint:disable=protected-access elif isinstance(exception, errors.MessageHandlerError): if hasattr(closable, "_close_handler"): - closable._close_handler() + closable._close_handler() # pylint:disable=protected-access elif isinstance(exception, errors.AMQPConnectionError): if hasattr(closable, "_close_connection"): - closable._close_connection() + closable._close_connection() # pylint:disable=protected-access elif isinstance(exception, compat.TimeoutException): pass # Timeout doesn't need to recreate link or connection to retry else: if hasattr(closable, "_close_connection"): - closable._close_connection() + closable._close_connection() # pylint:disable=protected-access # start processing retry delay try: backoff_factor = closable.client.config.backoff_factor @@ -228,7 +222,7 @@ def _handle_exception(exception, retry_count, max_retries, closable, timeout_tim backoff_factor = closable.config.backoff_factor backoff_max = closable.config.backoff_max backoff = backoff_factor * 2 ** retry_count - if backoff <= backoff_max and (timeout_time is None or time.time() + backoff <= timeout_time): + if backoff <= backoff_max and (timeout_time is None or time.time() + backoff <= timeout_time): #pylint:disable=no-else-return time.sleep(backoff) log.info("%r has an exception (%r). Retrying...", format(name), exception) return _create_eventhub_exception(exception) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py index 96c647909043..1a1217957686 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py @@ -28,11 +28,11 @@ def _error(outcome, condition): def _set_partition_key(event_datas, partition_key): ed_iter = iter(event_datas) for ed in ed_iter: - ed._set_partition_key(partition_key) + ed._set_partition_key(partition_key) # pylint:disable=protected-access yield ed -class EventHubProducer(ConsumerProducerMixin): +class EventHubProducer(ConsumerProducerMixin): # pylint:disable=too-many-instance-attributes """ A producer responsible for transmitting EventData to a specific Event Hub, grouped together in batches. Depending on the options specified at creation, the producer may @@ -111,9 +111,9 @@ def _open(self, timeout_time=None, **kwargs): context will be used to create a new handler before opening it. """ - # pylint: disable=protected-access + if not self.running and self.redirected: - self.client._process_redirect_uri(self.redirected) + self.client._process_redirect_uri(self.redirected) # pylint: disable=protected-access self.target = self.redirected.address super(EventHubProducer, self)._open(timeout_time) @@ -140,7 +140,6 @@ def _send_event_data(self, timeout_time=None, last_exception=None): if self._outcome == constants.MessageSendResult.Timeout: self._condition = OperationTimeoutError("send operation timed out") _error(self._outcome, self._condition) - return @_retry_decorator def _send_event_data_with_retry(self, timeout_time=None, last_exception=None): @@ -229,7 +228,7 @@ def send(self, event_data, partition_key=None, timeout=None): wrapper_event_data = event_data else: if isinstance(event_data, EventDataBatch): # The partition_key in the param will be omitted. - if partition_key and not (partition_key == event_data._partition_key): # pylint: disable=protected-access + if partition_key and not partition_key == event_data._partition_key: # pylint: disable=protected-access raise EventDataError('The partition_key does not match the one of the EventDataBatch') wrapper_event_data = event_data # type:ignore else: @@ -240,7 +239,7 @@ def send(self, event_data, partition_key=None, timeout=None): self.unsent_events = [wrapper_event_data.message] self._send_event_data_with_retry(timeout=timeout) - def close(self, exception=None): + def close(self, exception=None): # pylint:disable=useless-super-delegation # type:(Exception) -> None """ Close down the handler. If the handler has already closed, From 247004a077f897fabc34c2e66fbc4ea15e96e0b3 Mon Sep 17 00:00:00 2001 From: yijxie Date: Thu, 29 Aug 2019 08:29:02 -0700 Subject: [PATCH 07/51] Fix pylint --- .../azure/eventhub/aio/client_async.py | 1 + .../azure/eventhub/aio/producer_async.py | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index 1c02466a9add..d9b1be918bf4 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -287,4 +287,5 @@ def create_producer( return handler async def close(self): + # type: () -> None await self._conn_manager.close_connection() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index 1f4fde946cf2..1e819aa12d74 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -20,7 +20,7 @@ log = logging.getLogger(__name__) -class EventHubProducer(ConsumerProducerMixin): +class EventHubProducer(ConsumerProducerMixin): # pylint: disable=too-many-instance-attributes """ A producer responsible for transmitting EventData to a specific Event Hub, grouped together in batches. Depending on the options specified at creation, the producer may @@ -94,8 +94,8 @@ def _create_handler(self): keep_alive_interval=self.keep_alive, client_name=self.name, link_properties=self._link_properties, - properties=self.client._create_properties( - self.client.config.user_agent), # pylint: disable=protected-access + properties=self.client._create_properties( # pylint: disable=protected-access + self.client.config.user_agent), loop=self.loop) async def _open(self, timeout_time=None, **kwargs): @@ -106,7 +106,7 @@ async def _open(self, timeout_time=None, **kwargs): """ if not self.running and self.redirected: - self.client._process_redirect_uri(self.redirected) + self.client._process_redirect_uri(self.redirected) # pylint: disable=protected-access self.target = self.redirected.address await super(EventHubProducer, self)._open(timeout_time) @@ -222,7 +222,7 @@ async def send(self, event_data, *, partition_key=None, timeout=None): wrapper_event_data = event_data else: if isinstance(event_data, EventDataBatch): - if partition_key and not (partition_key == event_data._partition_key): # pylint: disable=protected-access + if partition_key and partition_key != event_data._partition_key: # pylint: disable=protected-access raise EventDataError('The partition_key does not match the one of the EventDataBatch') wrapper_event_data = event_data #type: ignore else: @@ -233,7 +233,7 @@ async def send(self, event_data, *, partition_key=None, timeout=None): self.unsent_events = [wrapper_event_data.message] await self._send_event_data_with_retry(timeout=timeout) - async def close(self, exception=None): + async def close(self, exception=None): # type: (Exception) -> None """ Close down the handler. If the handler has already closed, From 6ace6ce8747b936a0e16bf60deaec3974e52ee03 Mon Sep 17 00:00:00 2001 From: yijxie Date: Thu, 29 Aug 2019 09:05:22 -0700 Subject: [PATCH 08/51] Use properties EventData.partition_key --- sdk/eventhub/azure-eventhubs/tests/test_send.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_send.py b/sdk/eventhub/azure-eventhubs/tests/test_send.py index 8499ff93b36d..249b04215c7a 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_send.py @@ -33,7 +33,7 @@ def test_send_with_partition_key(connstr_receivers): received = partition.receive(timeout=5) for message in received: try: - existing = found_partition_keys[message._partition_key] + existing = found_partition_keys[message.partition_key] assert existing == index except KeyError: found_partition_keys[message._partition_key] = index From 008421d9968bba7c7d9c39baa4a2ff6782e766e2 Mon Sep 17 00:00:00 2001 From: yijxie Date: Thu, 22 Aug 2019 17:54:12 -0700 Subject: [PATCH 09/51] Small changes from code review --- .../azure-eventhubs/azure/eventhub/aio/client_async.py | 9 ++++----- sdk/eventhub/azure-eventhubs/azure/eventhub/client.py | 7 +++---- .../azure-eventhubs/azure/eventhub/client_abstract.py | 3 +-- 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index 4ad5a99f538c..a9fdb32381ef 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -221,12 +221,11 @@ def create_consumer(self, consumer_group: str, partition_id: str, event_position :caption: Add an async consumer to the client for a particular consumer group and partition. """ - owner_level = kwargs.get("owner_level", None) - operation = kwargs.get("operation", None) - prefetch = kwargs.get("prefetch", None) - loop = kwargs.get("loop", None) + owner_level = kwargs.get("owner_level") + operation = kwargs.get("operation") + prefetch = kwargs.get("prefetch") or self.config.prefetch + loop = kwargs.get("loop") - prefetch = prefetch or self.config.prefetch path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, path, consumer_group, partition_id) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index 1656ad27d074..4513cf900917 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -228,11 +228,10 @@ def create_consumer(self, consumer_group, partition_id, event_position, **kwargs :caption: Add a consumer to the client for a particular consumer group and partition. """ - owner_level = kwargs.get("owner_level", None) - operation = kwargs.get("operation", None) - prefetch = kwargs.get("prefetch", None) + owner_level = kwargs.get("owner_level") + operation = kwargs.get("operation") + prefetch = kwargs.get("prefetch") or self.config.prefetch - prefetch = prefetch or self.config.prefetch path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, path, consumer_group, partition_id) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index dd62e3e76de1..c26969bb5094 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -266,7 +266,7 @@ def from_connection_string(cls, conn_str, **kwargs): :caption: Create an EventHubClient from a connection string. """ - event_hub_path = kwargs.get("event_hub_path", None) + event_hub_path = kwargs.pop("event_hub_path") is_iot_conn_str = conn_str.lstrip().lower().startswith("hostname") if not is_iot_conn_str: address, policy, key, entity = _parse_conn_str(conn_str) @@ -276,7 +276,6 @@ def from_connection_string(cls, conn_str, **kwargs): host = address[left_slash_pos + 2:] else: host = address - kwargs.pop("event_hub_path", None) return cls(host, entity, EventHubSharedKeyCredential(policy, key), **kwargs) else: return cls._from_iothub_connection_string(conn_str, **kwargs) From b8c027d67a030b0c315252fcc207d1c2fed0f941 Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 26 Aug 2019 09:49:59 -0700 Subject: [PATCH 10/51] change EventData.msg_properties to private attribute --- .../azure-eventhubs/azure/eventhub/common.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index 9df6aa5d1c6a..ea609aa213bd 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -54,6 +54,7 @@ class EventData(object): PROP_SEQ_NUMBER = b"x-opt-sequence-number" PROP_OFFSET = b"x-opt-offset" PROP_PARTITION_KEY = b"x-opt-partition-key" + PROP_PARTITION_KEY_AMQP_SYMBOL = types.AMQPSymbol(PROP_PARTITION_KEY) PROP_TIMESTAMP = b"x-opt-enqueued-time" PROP_DEVICE_ID = b"iothub-connection-device-id" @@ -67,20 +68,19 @@ def __init__(self, body=None, to_device=None): :type to_device: str """ - self._partition_key = types.AMQPSymbol(EventData.PROP_PARTITION_KEY) self._annotations = {} self._app_properties = {} - self.msg_properties = MessageProperties() + self._msg_properties = MessageProperties() if to_device: - self.msg_properties.to = '/devices/{}/messages/devicebound'.format(to_device) + self._msg_properties.to = '/devices/{}/messages/devicebound'.format(to_device) if body and isinstance(body, list): - self.message = Message(body[0], properties=self.msg_properties) + self.message = Message(body[0], properties=self._msg_properties) for more in body[1:]: self.message._body.append(more) # pylint: disable=protected-access elif body is None: raise ValueError("EventData cannot be None.") else: - self.message = Message(body, properties=self.msg_properties) + self.message = Message(body, properties=self._msg_properties) def __str__(self): dic = { @@ -108,7 +108,7 @@ def _set_partition_key(self, value): :type value: str or bytes """ annotations = dict(self._annotations) - annotations[self._partition_key] = value + annotations[EventData.PROP_PARTITION_KEY_AMQP_SYMBOL] = value header = MessageHeader() header.durable = True self.message.annotations = annotations @@ -119,7 +119,7 @@ def _set_partition_key(self, value): def _from_message(message): event_data = EventData(body='') event_data.message = message - event_data.msg_properties = message.properties + event_data._msg_properties = message.properties event_data._annotations = message.annotations event_data._app_properties = message.application_properties return event_data @@ -175,7 +175,7 @@ def partition_key(self): :rtype: bytes """ try: - return self._annotations[self._partition_key] + return self._annotations[EventData.PROP_PARTITION_KEY_AMQP_SYMBOL] except KeyError: return self._annotations.get(EventData.PROP_PARTITION_KEY, None) From 2489dd3219f0ef2621ada8b91b640381968fc964 Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 26 Aug 2019 19:15:07 -0700 Subject: [PATCH 11/51] remove abstract method --- .../azure-eventhubs/azure/eventhub/client_abstract.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index c26969bb5094..030e85f39896 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -266,7 +266,7 @@ def from_connection_string(cls, conn_str, **kwargs): :caption: Create an EventHubClient from a connection string. """ - event_hub_path = kwargs.pop("event_hub_path") + event_hub_path = kwargs.pop("event_hub_path", None) is_iot_conn_str = conn_str.lstrip().lower().startswith("hostname") if not is_iot_conn_str: address, policy, key, entity = _parse_conn_str(conn_str) @@ -279,11 +279,3 @@ def from_connection_string(cls, conn_str, **kwargs): return cls(host, entity, EventHubSharedKeyCredential(policy, key), **kwargs) else: return cls._from_iothub_connection_string(conn_str, **kwargs) - - @abstractmethod - def create_consumer(self, consumer_group, partition_id, event_position, **kwargs): - pass - - @abstractmethod - def create_producer(self, partition_id=None, operation=None, send_timeout=None): - pass From 3a2d72fc297200e71fbceec8d8a7e475edd70d8b Mon Sep 17 00:00:00 2001 From: yijxie Date: Wed, 28 Aug 2019 14:10:48 -0700 Subject: [PATCH 12/51] code clean 1 --- sdk/eventhub/azure-eventhubs/azure/__init__.py | 2 +- .../azure/eventhub/_connection_manager.py | 2 +- .../azure/eventhub/_consumer_producer_mixin.py | 2 +- .../azure/eventhub/aio/_connection_manager_async.py | 4 ++-- .../eventhub/aio/_consumer_producer_mixin_async.py | 2 +- .../azure/eventhub/aio/producer_async.py | 2 +- sdk/eventhub/azure-eventhubs/azure/eventhub/client.py | 10 +++++----- .../azure-eventhubs/azure/eventhub/client_abstract.py | 5 +++-- sdk/eventhub/azure-eventhubs/azure/eventhub/common.py | 4 ++-- .../azure-eventhubs/azure/eventhub/configuration.py | 2 +- sdk/eventhub/azure-eventhubs/azure/eventhub/error.py | 2 +- .../azure-eventhubs/azure/eventhub/producer.py | 2 +- 12 files changed, 20 insertions(+), 19 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/__init__.py b/sdk/eventhub/azure-eventhubs/azure/__init__.py index 66c5d46008f7..62351a0ab30b 100644 --- a/sdk/eventhub/azure-eventhubs/azure/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/__init__.py @@ -2,4 +2,4 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -__path__ = __import__('pkgutil').extend_path(__path__, __name__) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py index 166703d698e7..505b198ff910 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py @@ -4,7 +4,7 @@ # -------------------------------------------------------------------------------------------- from threading import RLock -from uamqp import Connection, TransportType, c_uamqp +from uamqp import Connection, TransportType, c_uamqp # type: ignore class _SharedConnectionManager(object): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py index 8e3cae1e8a0b..0c29fc4b1ad9 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py @@ -119,7 +119,7 @@ def close(self, exception=None): """ self.running = False - if self.error: + if self.error: # type: ignore return if isinstance(exception, errors.LinkRedirect): self.redirected = exception diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py index 618359192ffe..6b70f72cbe3e 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py @@ -4,8 +4,8 @@ # -------------------------------------------------------------------------------------------- from asyncio import Lock -from uamqp import TransportType, c_uamqp -from uamqp.async_ops import ConnectionAsync +from uamqp import TransportType, c_uamqp # type: ignore +from uamqp.async_ops import ConnectionAsync # type: ignore class _SharedConnectionManager(object): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py index 8ccdcdddcd47..0b94893848bf 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py @@ -120,7 +120,7 @@ async def close(self, exception=None): """ self.running = False - if self.error: + if self.error: #type: ignore return if isinstance(exception, errors.LinkRedirect): self.redirected = exception diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index c45cf9a6b283..7a911be27ab0 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -224,7 +224,7 @@ async def send(self, event_data, *, partition_key=None, timeout=None): if isinstance(event_data, EventDataBatch): if partition_key and not (partition_key == event_data._partition_key): # pylint: disable=protected-access raise EventDataError('The partition_key does not match the one of the EventDataBatch') - wrapper_event_data = event_data + wrapper_event_data = event_data #type: ignore else: if partition_key: event_data = _set_partition_key(event_data, partition_key) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index 4513cf900917..c6d0ced931b2 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -9,15 +9,15 @@ import functools try: from urlparse import urlparse - from urllib import unquote_plus, urlencode, quote_plus + from urllib import unquote_plus, urlencode, quote_plus # type: ignore except ImportError: from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus from typing import Any, List, Dict -import uamqp -from uamqp import Message -from uamqp import authentication -from uamqp import constants +import uamqp # type: ignore +from uamqp import Message # type: ignore +from uamqp import authentication # type: ignore +from uamqp import constants # type: ignore from azure.eventhub.producer import EventHubProducer from azure.eventhub.consumer import EventHubConsumer diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 030e85f39896..32b9993091da 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -10,9 +10,10 @@ import time import functools from abc import abstractmethod +from typing import Dict try: from urlparse import urlparse - from urllib import unquote_plus, urlencode, quote_plus + from urllib import unquote_plus, urlencode, quote_plus # type: ignore except ImportError: from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus @@ -140,7 +141,7 @@ def __init__(self, host, event_hub_path, credential, **kwargs): self.address = _Address() self.address.hostname = host self.address.path = "/" + event_hub_path if event_hub_path else "" - self._auth_config = {} + self._auth_config = {} # type:Dict[str,str] self.credential = credential if isinstance(credential, EventHubSharedKeyCredential): self.username = credential.policy diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index ea609aa213bd..cb8e1a7103b1 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -11,8 +11,8 @@ import logging from azure.eventhub.error import EventDataError -from uamqp import BatchMessage, Message, types, constants -from uamqp.message import MessageHeader, MessageProperties +from uamqp import BatchMessage, Message, types, constants # type: ignore +from uamqp.message import MessageHeader, MessageProperties # type: ignore log = logging.getLogger(__name__) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py index 1de21811f2d3..5be9df85c732 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py @@ -2,7 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -from uamqp.constants import TransportType +from uamqp.constants import TransportType # type: ignore class _Configuration(object): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py index cbe6a8a04946..f48be4e557ea 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py @@ -6,7 +6,7 @@ import time import logging -from uamqp import constants, errors, compat +from uamqp import constants, errors, compat # type: ignore _NO_RETRY_ERRORS = ( diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py index 8655b16bd451..8a7ed5349e90 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py @@ -231,7 +231,7 @@ def send(self, event_data, partition_key=None, timeout=None): if isinstance(event_data, EventDataBatch): # The partition_key in the param will be omitted. if partition_key and not (partition_key == event_data._partition_key): # pylint: disable=protected-access raise EventDataError('The partition_key does not match the one of the EventDataBatch') - wrapper_event_data = event_data + wrapper_event_data = event_data # type:ignore else: if partition_key: event_data = _set_partition_key(event_data, partition_key) From 97357562aa84c383bd9db700724f657ba4c94e04 Mon Sep 17 00:00:00 2001 From: yijxie Date: Wed, 28 Aug 2019 14:20:37 -0700 Subject: [PATCH 13/51] code clean 2 --- sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py | 2 +- .../azure/eventhub/_consumer_producer_mixin.py | 2 +- .../azure/eventhub/aio/_consumer_producer_mixin_async.py | 2 +- .../azure-eventhubs/azure/eventhub/aio/client_async.py | 4 ++-- .../azure-eventhubs/azure/eventhub/aio/consumer_async.py | 4 ++-- .../azure-eventhubs/azure/eventhub/aio/error_async.py | 2 +- .../azure-eventhubs/azure/eventhub/aio/producer_async.py | 4 ++-- sdk/eventhub/azure-eventhubs/azure/eventhub/client.py | 2 +- .../azure-eventhubs/azure/eventhub/client_abstract.py | 4 ++-- sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py | 4 ++-- sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py | 4 ++-- 11 files changed, 17 insertions(+), 17 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py index 0ccd9d5ec73c..74627c8bf854 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py @@ -11,7 +11,7 @@ from azure.eventhub.client import EventHubClient from azure.eventhub.producer import EventHubProducer from azure.eventhub.consumer import EventHubConsumer -from uamqp import constants +from uamqp import constants # type: ignore from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential TransportType = constants.TransportType diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py index 0c29fc4b1ad9..108d3772eeee 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py @@ -7,7 +7,7 @@ import logging import time -from uamqp import errors, constants, compat +from uamqp import errors, constants, compat # type: ignore from azure.eventhub.error import EventHubError, _handle_exception log = logging.getLogger(__name__) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py index 0b94893848bf..49a4e071f921 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py @@ -6,7 +6,7 @@ import logging import time -from uamqp import errors, constants, compat +from uamqp import errors, constants, compat # type: ignore from azure.eventhub.error import EventHubError, ConnectError from ..aio.error_async import _handle_exception diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index a9fdb32381ef..fa03a3d5324d 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -8,11 +8,11 @@ import asyncio from typing import Any, List, Dict -from uamqp import authentication, constants +from uamqp import authentication, constants # type: ignore from uamqp import ( Message, AMQPClientAsync, -) +) # type: ignore from azure.eventhub.common import parse_sas_token, EventPosition, EventHubSharedKeyCredential, EventHubSASTokenCredential from ..client_abstract import EventHubClientAbstract diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py index 404fc23312f0..2814b0c54b4a 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py @@ -8,8 +8,8 @@ from typing import List import time -from uamqp import errors, types -from uamqp import ReceiveClientAsync, Source +from uamqp import errors, types # type: ignore +from uamqp import ReceiveClientAsync, Source # type: ignore from azure.eventhub import EventData, EventPosition from azure.eventhub.error import EventHubError, ConnectError, _error_handler diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py index b44f8cb54a33..51d5ac8ad0f1 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py @@ -2,7 +2,7 @@ import time import logging -from uamqp import errors, compat +from uamqp import errors, compat # type: ignore from ..error import EventHubError, EventDataSendError, \ EventDataError, ConnectError, ConnectionLostError, AuthenticationError diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index 7a911be27ab0..96be7e90ed15 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -8,8 +8,8 @@ from typing import Iterable, Union import time -from uamqp import types, constants, errors -from uamqp import SendClientAsync +from uamqp import types, constants, errors # type: ignore +from uamqp import SendClientAsync # type: ignore from azure.eventhub.common import EventData, EventDataBatch from azure.eventhub.error import _error_handler, OperationTimeoutError, EventDataError diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index c6d0ced931b2..872ba98f48e0 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -8,7 +8,7 @@ import datetime import functools try: - from urlparse import urlparse + from urlparse import urlparse # type: ignore from urllib import unquote_plus, urlencode, quote_plus # type: ignore except ImportError: from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 32b9993091da..d861b385336c 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -12,7 +12,7 @@ from abc import abstractmethod from typing import Dict try: - from urlparse import urlparse + from urlparse import urlparse # type: ignore from urllib import unquote_plus, urlencode, quote_plus # type: ignore except ImportError: from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus @@ -22,7 +22,7 @@ except ImportError: TYPE_CHECKING = False if TYPE_CHECKING: - from azure.core.credentials import TokenCredential + from azure.core.credentials import TokenCredential # type: ignore from typing import Union, Any from azure.eventhub import __version__ diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py index 1f2b6db17728..883840d7d720 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py @@ -9,8 +9,8 @@ import time from typing import List -from uamqp import types, errors -from uamqp import ReceiveClient, Source +from uamqp import types, errors # type: ignore +from uamqp import ReceiveClient, Source # type: ignore from azure.eventhub.common import EventData, EventPosition from azure.eventhub.error import _error_handler diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py index 8a7ed5349e90..96c647909043 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py @@ -9,8 +9,8 @@ import time from typing import Iterable, Union -from uamqp import types, constants, errors -from uamqp import SendClient +from uamqp import types, constants, errors # type: ignore +from uamqp import SendClient # type: ignore from azure.eventhub.common import EventData, EventDataBatch from azure.eventhub.error import _error_handler, OperationTimeoutError, EventDataError From 288617e8899342ec564547c563db5827fe443fce Mon Sep 17 00:00:00 2001 From: yijxie Date: Wed, 28 Aug 2019 17:35:35 -0700 Subject: [PATCH 14/51] Fix pylint --- .../azure/eventhub/__init__.py | 3 +- .../azure/eventhub/_connection_manager.py | 14 ++++---- .../eventhub/_consumer_producer_mixin.py | 6 ++-- .../eventhub/aio/_connection_manager_async.py | 12 +++---- .../aio/_consumer_producer_mixin_async.py | 10 +++--- .../azure/eventhub/aio/client_async.py | 31 ++++++++++++----- .../azure/eventhub/aio/consumer_async.py | 20 +++++------ .../azure/eventhub/aio/error_async.py | 32 +++++++++-------- .../azure/eventhub/aio/producer_async.py | 4 +-- .../azure-eventhubs/azure/eventhub/client.py | 19 +++++------ .../azure/eventhub/client_abstract.py | 25 ++++++-------- .../azure-eventhubs/azure/eventhub/common.py | 20 +++++------ .../azure/eventhub/configuration.py | 2 +- .../azure/eventhub/consumer.py | 20 +++++------ .../azure-eventhubs/azure/eventhub/error.py | 34 ++++++++----------- .../azure/eventhub/producer.py | 13 ++++--- 16 files changed, 136 insertions(+), 129 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py index 74627c8bf854..040d00c947d8 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py @@ -4,14 +4,13 @@ # -------------------------------------------------------------------------------------------- __version__ = "5.0.0b2" - +from uamqp import constants # type: ignore from azure.eventhub.common import EventData, EventDataBatch, EventPosition from azure.eventhub.error import EventHubError, EventDataError, ConnectError, \ AuthenticationError, EventDataSendError, ConnectionLostError from azure.eventhub.client import EventHubClient from azure.eventhub.producer import EventHubProducer from azure.eventhub.consumer import EventHubConsumer -from uamqp import constants # type: ignore from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential TransportType = constants.TransportType diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py index 505b198ff910..77c12a376f97 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/_connection_manager.py @@ -7,7 +7,7 @@ from uamqp import Connection, TransportType, c_uamqp # type: ignore -class _SharedConnectionManager(object): +class _SharedConnectionManager(object): #pylint:disable=too-many-instance-attributes def __init__(self, **kwargs): self._lock = RLock() self._conn = None # type: Connection @@ -50,11 +50,11 @@ def close_connection(self): def reset_connection_if_broken(self): with self._lock: - if self._conn and self._conn._state in ( - c_uamqp.ConnectionState.CLOSE_RCVD, - c_uamqp.ConnectionState.CLOSE_SENT, - c_uamqp.ConnectionState.DISCARDING, - c_uamqp.ConnectionState.END, + if self._conn and self._conn._state in ( # pylint:disable=protected-access + c_uamqp.ConnectionState.CLOSE_RCVD, # pylint:disable=c-extension-no-member + c_uamqp.ConnectionState.CLOSE_SENT, # pylint:disable=c-extension-no-member + c_uamqp.ConnectionState.DISCARDING, # pylint:disable=c-extension-no-member + c_uamqp.ConnectionState.END, # pylint:disable=c-extension-no-member ): self._conn = None @@ -63,7 +63,7 @@ class _SeparateConnectionManager(object): def __init__(self, **kwargs): pass - def get_connection(self, host, auth): + def get_connection(self, host, auth): # pylint:disable=unused-argument, no-self-use return None def close_connection(self): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py index 108d3772eeee..837aaab26d78 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py @@ -25,8 +25,8 @@ def wrapped_func(self, *args, **kwargs): while True: try: return to_be_wrapped_func(self, timeout_time=timeout_time, last_exception=last_exception, **kwargs) - except Exception as exception: - last_exception = self._handle_exception(exception, retry_count, max_retries, timeout_time) + except Exception as exception: # pylint:disable=broad-except + last_exception = self._handle_exception(exception, retry_count, max_retries, timeout_time) # pylint:disable=protected-access retry_count += 1 return wrapped_func @@ -89,7 +89,7 @@ def _close_handler(self): def _close_connection(self): self._close_handler() - self.client._conn_manager.reset_connection_if_broken() + self.client._conn_manager.reset_connection_if_broken() # pylint: disable=protected-access def _handle_exception(self, exception, retry_count, max_retries, timeout_time): if not self.running and isinstance(exception, compat.TimeoutException): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py index 6b70f72cbe3e..2b38f2fd220e 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_connection_manager_async.py @@ -8,7 +8,7 @@ from uamqp.async_ops import ConnectionAsync # type: ignore -class _SharedConnectionManager(object): +class _SharedConnectionManager(object): # pylint:disable=too-many-instance-attributes def __init__(self, **kwargs): self._lock = Lock() self._conn = None @@ -51,11 +51,11 @@ async def close_connection(self): async def reset_connection_if_broken(self): async with self._lock: - if self._conn and self._conn._state in ( - c_uamqp.ConnectionState.CLOSE_RCVD, - c_uamqp.ConnectionState.CLOSE_SENT, - c_uamqp.ConnectionState.DISCARDING, - c_uamqp.ConnectionState.END, + if self._conn and self._conn._state in ( # pylint:disable=protected-access + c_uamqp.ConnectionState.CLOSE_RCVD, # pylint:disable=c-extension-no-member + c_uamqp.ConnectionState.CLOSE_SENT, # pylint:disable=c-extension-no-member + c_uamqp.ConnectionState.DISCARDING, # pylint:disable=c-extension-no-member + c_uamqp.ConnectionState.END, # pylint:disable=c-extension-no-member ): self._conn = None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py index 49a4e071f921..0ca5889aa972 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py @@ -24,8 +24,10 @@ async def wrapped_func(self, *args, **kwargs): last_exception = None while True: try: - return await to_be_wrapped_func(self, timeout_time=timeout_time, last_exception=last_exception, **kwargs) - except Exception as exception: + return await to_be_wrapped_func( + self, timeout_time=timeout_time, last_exception=last_exception, **kwargs + ) + except Exception as exception: # pylint:disable=broad-except last_exception = await self._handle_exception(exception, retry_count, max_retries, timeout_time) retry_count += 1 return wrapped_func @@ -90,7 +92,7 @@ async def _close_handler(self): async def _close_connection(self): await self._close_handler() - await self.client._conn_manager.reset_connection_if_broken() + await self.client._conn_manager.reset_connection_if_broken() # pylint:disable=protected-access async def _handle_exception(self, exception, retry_count, max_retries, timeout_time): if not self.running and isinstance(exception, compat.TimeoutException): @@ -133,4 +135,4 @@ async def close(self, exception=None): else: self.error = EventHubError("This receive handler is now closed.") if self._handler: - await self._handler.close_async() \ No newline at end of file + await self._handler.close_async() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index fa03a3d5324d..1c02466a9add 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -6,7 +6,7 @@ import datetime import functools import asyncio -from typing import Any, List, Dict +from typing import Any, List, Dict, Union, TYPE_CHECKING from uamqp import authentication, constants # type: ignore from uamqp import ( @@ -14,7 +14,8 @@ AMQPClientAsync, ) # type: ignore -from azure.eventhub.common import parse_sas_token, EventPosition, EventHubSharedKeyCredential, EventHubSASTokenCredential +from azure.eventhub.common import parse_sas_token, EventPosition, \ + EventHubSharedKeyCredential, EventHubSASTokenCredential from ..client_abstract import EventHubClientAbstract from .producer_async import EventHubProducer @@ -22,6 +23,8 @@ from ._connection_manager_async import get_connection_manager from .error_async import _handle_exception +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential # type: ignore log = logging.getLogger(__name__) @@ -42,7 +45,9 @@ class EventHubClient(EventHubClientAbstract): """ def __init__(self, host, event_hub_path, credential, **kwargs): - super(EventHubClient, self).__init__(host, event_hub_path, credential, **kwargs) + # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], ...) -> None + + super(EventHubClient, self).__init__(host=host, event_hub_path=event_hub_path, credential=credential, **kwargs) self._conn_manager = get_connection_manager(**kwargs) async def __aenter__(self): @@ -65,7 +70,7 @@ def _create_auth(self, username=None, password=None): transport_type = self.config.transport_type auth_timeout = self.config.auth_timeout - if isinstance(self.credential, EventHubSharedKeyCredential): + if isinstance(self.credential, EventHubSharedKeyCredential): # pylint:disable=no-else-return username = username or self._auth_config['username'] password = password or self._auth_config['password'] if "@sas.root" in username: @@ -116,7 +121,7 @@ async def _management_request(self, mgmt_msg, op_type): status_code_field=b'status-code', description_fields=b'status-description') return response - except Exception as exception: + except Exception as exception: # pylint:disable=broad-except await self._handle_exception(exception, retry_count, max_retries) retry_count += 1 finally: @@ -190,7 +195,12 @@ async def get_partition_properties(self, partition): output['is_empty'] = partition_info[b'is_partition_empty'] return output - def create_consumer(self, consumer_group: str, partition_id: str, event_position: EventPosition, **kwargs): + def create_consumer( + self, + consumer_group: str, + partition_id: str, + event_position: EventPosition, **kwargs + ) -> EventHubConsumer: """ Create an async consumer to the client for a particular consumer group and partition. @@ -234,8 +244,13 @@ def create_consumer(self, consumer_group: str, partition_id: str, event_position prefetch=prefetch, loop=loop) return handler - def create_producer(self, *, partition_id=None, operation=None, send_timeout=None, loop=None): - # type: (str, str, float, asyncio.AbstractEventLoop) -> EventHubProducer + def create_producer( + self, *, + partition_id: str = None, + operation: str = None, + send_timeout: float = None, + loop: asyncio.AbstractEventLoop = None + ) -> EventHubProducer: """ Create an async producer to send EventData object to an EventHub. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py index 2814b0c54b4a..3747d1af4d9d 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py @@ -18,7 +18,7 @@ log = logging.getLogger(__name__) -class EventHubConsumer(ConsumerProducerMixin): +class EventHubConsumer(ConsumerProducerMixin): # pylint:disable=too-many-instance-attributes """ A consumer responsible for reading EventData from a specific Event Hub partition and as a member of a specific consumer group. @@ -100,21 +100,21 @@ async def __anext__(self): if not self.messages_iter: self.messages_iter = self._handler.receive_messages_iter_async() message = await self.messages_iter.__anext__() - event_data = EventData._from_message(message) + event_data = EventData._from_message(message) # pylint:disable=protected-access self.offset = EventPosition(event_data.offset, inclusive=False) retry_count = 0 return event_data - except Exception as exception: - await self._handle_exception(exception, retry_count, max_retries) + except Exception as exception: # pylint:disable=broad-except + await self._handle_exception(exception, retry_count, max_retries, timeout_time=None) retry_count += 1 def _create_handler(self): alt_creds = { - "username": self.client._auth_config.get("iot_username"), - "password": self.client._auth_config.get("iot_password")} + "username": self.client._auth_config.get("iot_username"), # pylint:disable=protected-access + "password": self.client._auth_config.get("iot_password")} # pylint:disable=protected-access source = Source(self.source) if self.offset is not None: - source.set_filter(self.offset._selector()) + source.set_filter(self.offset._selector()) # pylint:disable=protected-access self._handler = ReceiveClientAsync( source, auth=self.client.get_auth(**alt_creds), @@ -125,8 +125,8 @@ def _create_handler(self): error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client._create_properties( - self.client.config.user_agent), # pylint: disable=protected-access + properties=self.client._create_properties( # pylint:disable=protected-access + self.client.config.user_agent), loop=self.loop) self.messages_iter = None @@ -164,7 +164,7 @@ async def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): max_batch_size=max_batch_size, timeout=remaining_time_ms) for message in message_batch: - event_data = EventData._from_message(message) + event_data = EventData._from_message(message) # pylint:disable=protected-access self.offset = EventPosition(event_data.offset) data_batch.append(event_data) return data_batch diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py index 51d5ac8ad0f1..5d0cff3ebc1d 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py @@ -1,3 +1,7 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- import asyncio import time import logging @@ -32,20 +36,20 @@ def _create_eventhub_exception(exception): return error -async def _handle_exception(exception, retry_count, max_retries, closable, timeout_time=None): +async def _handle_exception(exception, retry_count, max_retries, closable, timeout_time=None): # pylint:disable=too-many-branches, too-many-statements if isinstance(exception, asyncio.CancelledError): - raise + raise exception try: name = closable.name except AttributeError: name = closable.container_id - if isinstance(exception, KeyboardInterrupt): + if isinstance(exception, KeyboardInterrupt): # pylint:disable=no-else-raise log.info("%r stops due to keyboard interrupt", name) closable.close() - raise + raise exception elif isinstance(exception, EventHubError): closable.close() - raise + raise exception elif isinstance(exception, ( errors.MessageAccepted, errors.MessageAlreadySettled, @@ -68,29 +72,29 @@ async def _handle_exception(exception, retry_count, max_retries, closable, timeo else: if isinstance(exception, errors.AuthenticationException): if hasattr(closable, "_close_connection"): - await closable._close_connection() + await closable._close_connection() # pylint:disable=protected-access elif isinstance(exception, errors.LinkRedirect): log.info("%r link redirect received. Redirecting...", name) redirect = exception if hasattr(closable, "_redirect"): - await closable._redirect(redirect) + await closable._redirect(redirect) # pylint:disable=protected-access elif isinstance(exception, errors.LinkDetach): if hasattr(closable, "_close_handler"): - await closable._close_handler() + await closable._close_handler() # pylint:disable=protected-access elif isinstance(exception, errors.ConnectionClose): if hasattr(closable, "_close_connection"): - await closable._close_connection() + await closable._close_connection() # pylint:disable=protected-access elif isinstance(exception, errors.MessageHandlerError): if hasattr(closable, "_close_handler"): - await closable._close_handler() + await closable._close_handler() # pylint:disable=protected-access elif isinstance(exception, errors.AMQPConnectionError): if hasattr(closable, "_close_connection"): - await closable._close_connection() + await closable._close_connection() # pylint:disable=protected-access elif isinstance(exception, compat.TimeoutException): pass # Timeout doesn't need to recreate link or connection to retry else: if hasattr(closable, "_close_connection"): - await closable._close_connection() + await closable._close_connection() # pylint:disable=protected-access # start processing retry delay try: backoff_factor = closable.client.config.backoff_factor @@ -99,7 +103,7 @@ async def _handle_exception(exception, retry_count, max_retries, closable, timeo backoff_factor = closable.config.backoff_factor backoff_max = closable.config.backoff_max backoff = backoff_factor * 2 ** retry_count - if backoff <= backoff_max and (timeout_time is None or time.time() + backoff <= timeout_time): + if backoff <= backoff_max and (timeout_time is None or time.time() + backoff <= timeout_time): # pylint:disable=no-else-return await asyncio.sleep(backoff) log.info("%r has an exception (%r). Retrying...", format(name), exception) return _create_eventhub_exception(exception) @@ -107,4 +111,4 @@ async def _handle_exception(exception, retry_count, max_retries, closable, timeo error = _create_eventhub_exception(exception) log.info("%r operation has timed out. Last exception before timeout is (%r)", name, error) raise error - # end of processing retry delay \ No newline at end of file + # end of processing retry delay diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index 96be7e90ed15..1f4fde946cf2 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -5,7 +5,7 @@ import uuid import asyncio import logging -from typing import Iterable, Union +from typing import Iterable, Union, Any import time from uamqp import types, constants, errors # type: ignore @@ -185,7 +185,7 @@ async def create_batch(self, max_size=None, partition_key=None): return EventDataBatch(max_size=(max_size or self._max_message_size_on_link), partition_key=partition_key) async def send(self, event_data, *, partition_key=None, timeout=None): - # type:(Union[EventData, EventDataBatch, Iterable[EventData]], Union[str, bytes], float) -> None + # type:(Union[EventData, EventDataBatch, Iterable[EventData]],Any, Union[str, bytes], float) -> None """ Sends an event data and blocks until acknowledgement is received or operation times out. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index 872ba98f48e0..deb07ce8dba7 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -7,12 +7,7 @@ import logging import datetime import functools -try: - from urlparse import urlparse # type: ignore - from urllib import unquote_plus, urlencode, quote_plus # type: ignore -except ImportError: - from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus -from typing import Any, List, Dict +from typing import Any, List, Dict, Union, TYPE_CHECKING import uamqp # type: ignore from uamqp import Message # type: ignore @@ -27,6 +22,8 @@ from ._connection_manager import get_connection_manager from .error import _handle_exception +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential # type: ignore log = logging.getLogger(__name__) @@ -47,7 +44,8 @@ class EventHubClient(EventHubClientAbstract): """ def __init__(self, host, event_hub_path, credential, **kwargs): - super(EventHubClient, self).__init__(host, event_hub_path, credential, **kwargs) + # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], ...) -> None + super(EventHubClient, self).__init__(host=host, event_hub_path=event_hub_path, credential=credential, **kwargs) self._conn_manager = get_connection_manager(**kwargs) def __enter__(self): @@ -71,7 +69,7 @@ def _create_auth(self, username=None, password=None): auth_timeout = self.config.auth_timeout # TODO: the following code can be refactored to create auth from classes directly instead of using if-else - if isinstance(self.credential, EventHubSharedKeyCredential): + if isinstance(self.credential, EventHubSharedKeyCredential): # pylint:disable=no-else-return username = username or self._auth_config['username'] password = password or self._auth_config['password'] if "@sas.root" in username: @@ -114,7 +112,7 @@ def _management_request(self, mgmt_msg, op_type): mgmt_auth = self._create_auth() mgmt_client = uamqp.AMQPClient(self.mgmt_target) try: - conn = self._conn_manager.get_connection(self.host, mgmt_auth) + conn = self._conn_manager.get_connection(self.host, mgmt_auth) #pylint:disable=assignment-from-none mgmt_client.open(connection=conn) response = mgmt_client.mgmt_request( mgmt_msg, @@ -123,7 +121,7 @@ def _management_request(self, mgmt_msg, op_type): status_code_field=b'status-code', description_fields=b'status-description') return response - except Exception as exception: + except Exception as exception: # pylint: disable=broad-except self._handle_exception(exception, retry_count, max_retries) retry_count += 1 finally: @@ -277,4 +275,5 @@ def create_producer(self, partition_id=None, operation=None, send_timeout=None): return handler def close(self): + # type:() -> None self._conn_manager.close_connection() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index d861b385336c..068bd26810a8 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -10,24 +10,19 @@ import time import functools from abc import abstractmethod -from typing import Dict +from typing import Dict, Union, TYPE_CHECKING +from azure.eventhub import __version__ +from azure.eventhub.configuration import _Configuration +from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential, _Address + try: from urlparse import urlparse # type: ignore - from urllib import unquote_plus, urlencode, quote_plus # type: ignore + from urllib import urlencode, quote_plus # type: ignore except ImportError: - from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus + from urllib.parse import urlparse, urlencode, quote_plus -try: - from typing import TYPE_CHECKING -except ImportError: - TYPE_CHECKING = False if TYPE_CHECKING: from azure.core.credentials import TokenCredential # type: ignore - from typing import Union, Any - -from azure.eventhub import __version__ -from azure.eventhub.configuration import _Configuration -from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential, _Address log = logging.getLogger(__name__) MAX_USER_AGENT_LENGTH = 512 @@ -88,14 +83,14 @@ def _build_uri(address, entity): return address -class EventHubClientAbstract(object): +class EventHubClientAbstract(object): # pylint:disable=too-many-instance-attributes """ The EventHubClientAbstract class defines a high level interface for sending events to and receiving events from the Azure Event Hubs service. """ def __init__(self, host, event_hub_path, credential, **kwargs): - # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], Any) -> None + # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], ...) -> None """ Constructs a new EventHubClient. @@ -269,7 +264,7 @@ def from_connection_string(cls, conn_str, **kwargs): """ event_hub_path = kwargs.pop("event_hub_path", None) is_iot_conn_str = conn_str.lstrip().lower().startswith("hostname") - if not is_iot_conn_str: + if not is_iot_conn_str: # pylint:disable=no-else-return address, policy, key, entity = _parse_conn_str(conn_str) entity = event_hub_path or entity left_slash_pos = address.find("//") diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index cb8e1a7103b1..56ea25863be3 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -7,12 +7,12 @@ import datetime import calendar import json -import six import logging +import six -from azure.eventhub.error import EventDataError from uamqp import BatchMessage, Message, types, constants # type: ignore from uamqp.message import MessageHeader, MessageProperties # type: ignore +from azure.eventhub.error import EventDataError log = logging.getLogger(__name__) @@ -119,9 +119,9 @@ def _set_partition_key(self, value): def _from_message(message): event_data = EventData(body='') event_data.message = message - event_data._msg_properties = message.properties - event_data._annotations = message.annotations - event_data._app_properties = message.application_properties + event_data._msg_properties = message.properties # pylint:disable=protected-access + event_data._annotations = message.annotations # pylint:disable=protected-access + event_data._app_properties = message.application_properties # pylint:disable=protected-access return event_data @property @@ -281,7 +281,7 @@ def size(self): @staticmethod def _from_batch(batch_data, partition_key=None): batch_data_instance = EventDataBatch(partition_key=partition_key) - batch_data_instance.message._body_gen = batch_data + batch_data_instance.message._body_gen = batch_data # pylint:disable=protected-access return batch_data_instance def _set_partition_key(self, value): @@ -308,10 +308,10 @@ def try_add(self, event_data): raise TypeError('event_data should be type of EventData') if self._partition_key: - if event_data.partition_key and not (event_data.partition_key == self._partition_key): + if event_data.partition_key and not event_data.partition_key == self._partition_key: raise EventDataError('The partition_key of event_data does not match the one of the EventDataBatch') if not event_data.partition_key: - event_data._set_partition_key(self._partition_key) + event_data._set_partition_key(self._partition_key) # pylint:disable=protected-access event_data_size = event_data.message.get_message_encoded_size() @@ -368,7 +368,7 @@ def _selector(self): :rtype: bytes """ operator = ">=" if self.inclusive else ">" - if isinstance(self.value, datetime.datetime): + if isinstance(self.value, datetime.datetime): # pylint:disable=no-else-return timestamp = (calendar.timegm(self.value.utctimetuple()) * 1000) + (self.value.microsecond/1000) return ("amqp.annotation.x-opt-enqueued-time {} '{}'".format(operator, int(timestamp))).encode('utf-8') elif isinstance(self.value, six.integer_types): @@ -391,7 +391,7 @@ def __init__(self, token): self.token = token def get_sas_token(self): - if callable(self.token): + if callable(self.token): # pylint:disable=no-else-return return self.token() else: return self.token diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py index 5be9df85c732..e7ea5b43df46 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py @@ -5,7 +5,7 @@ from uamqp.constants import TransportType # type: ignore -class _Configuration(object): +class _Configuration(object): # pylint:disable=too-many-instance-attributes def __init__(self, **kwargs): self.user_agent = kwargs.get("user_agent") self.retry_total = kwargs.get('retry_total', 3) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py index 883840d7d720..44be38386b73 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py @@ -20,7 +20,7 @@ log = logging.getLogger(__name__) -class EventHubConsumer(ConsumerProducerMixin): +class EventHubConsumer(ConsumerProducerMixin): # pylint:disable=too-many-instance-attributes """ A consumer responsible for reading EventData from a specific Event Hub partition and as a member of a specific consumer group. @@ -96,21 +96,21 @@ def __next__(self): if not self.messages_iter: self.messages_iter = self._handler.receive_messages_iter() message = next(self.messages_iter) - event_data = EventData._from_message(message) + event_data = EventData._from_message(message) # pylint:disable=protected-access self.offset = EventPosition(event_data.offset, inclusive=False) retry_count = 0 return event_data - except Exception as exception: - self._handle_exception(exception, retry_count, max_retries) + except Exception as exception: # pylint:disable=broad-except + self._handle_exception(exception, retry_count, max_retries, timeout_time=None) retry_count += 1 def _create_handler(self): alt_creds = { - "username": self.client._auth_config.get("iot_username"), - "password": self.client._auth_config.get("iot_password")} + "username": self.client._auth_config.get("iot_username"), # pylint:disable=protected-access + "password": self.client._auth_config.get("iot_password")} # pylint:disable=protected-access source = Source(self.source) if self.offset is not None: - source.set_filter(self.offset._selector()) + source.set_filter(self.offset._selector()) # pylint:disable=protected-access self._handler = ReceiveClient( source, auth=self.client.get_auth(**alt_creds), @@ -121,8 +121,8 @@ def _create_handler(self): error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client._create_properties( - self.client.config.user_agent)) # pylint: disable=protected-access + properties=self.client._create_properties( # pylint:disable=protected-access + self.client.config.user_agent)) self.messages_iter = None def _redirect(self, redirect): @@ -158,7 +158,7 @@ def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): max_batch_size=max_batch_size - (len(data_batch) if data_batch else 0), timeout=remaining_time_ms) for message in message_batch: - event_data = EventData._from_message(message) + event_data = EventData._from_message(message) # pylint:disable=protected-access self.offset = EventPosition(event_data.offset) data_batch.append(event_data) return data_batch diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py index f48be4e557ea..72b11f5478ad 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py @@ -2,11 +2,11 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -import six import time import logging +import six -from uamqp import constants, errors, compat # type: ignore +from uamqp import errors, compat # type: ignore _NO_RETRY_ERRORS = ( @@ -102,14 +102,12 @@ class ConnectionLostError(EventHubError): """Connection to event hub is lost. SDK will retry. So this shouldn't happen. """ - pass class ConnectError(EventHubError): """Fail to connect to event hubs """ - pass class AuthenticationError(ConnectError): @@ -117,28 +115,24 @@ class AuthenticationError(ConnectError): """ - pass class EventDataError(EventHubError): """Problematic event data so the send will fail at client side """ - pass class EventDataSendError(EventHubError): """Service returns error while an event data is being sent """ - pass class OperationTimeoutError(EventHubError): """Operation times out """ - pass def _create_eventhub_exception(exception): @@ -163,18 +157,18 @@ def _create_eventhub_exception(exception): return error -def _handle_exception(exception, retry_count, max_retries, closable, timeout_time=None): +def _handle_exception(exception, retry_count, max_retries, closable, timeout_time=None): # pylint:disable=too-many-branches, too-many-statements try: name = closable.name except AttributeError: name = closable.container_id - if isinstance(exception, KeyboardInterrupt): + if isinstance(exception, KeyboardInterrupt): # pylint:disable=no-else-raise log.info("%r stops due to keyboard interrupt", name) closable.close() - raise + raise exception elif isinstance(exception, EventHubError): closable.close() - raise + raise exception elif isinstance(exception, ( errors.MessageAccepted, errors.MessageAlreadySettled, @@ -197,29 +191,29 @@ def _handle_exception(exception, retry_count, max_retries, closable, timeout_tim else: if isinstance(exception, errors.AuthenticationException): if hasattr(closable, "_close_connection"): - closable._close_connection() + closable._close_connection() # pylint:disable=protected-access elif isinstance(exception, errors.LinkRedirect): log.info("%r link redirect received. Redirecting...", name) redirect = exception if hasattr(closable, "_redirect"): - closable._redirect(redirect) + closable._redirect(redirect) # pylint:disable=protected-access elif isinstance(exception, errors.LinkDetach): if hasattr(closable, "_close_handler"): - closable._close_handler() + closable._close_handler() # pylint:disable=protected-access elif isinstance(exception, errors.ConnectionClose): if hasattr(closable, "_close_connection"): - closable._close_connection() + closable._close_connection() # pylint:disable=protected-access elif isinstance(exception, errors.MessageHandlerError): if hasattr(closable, "_close_handler"): - closable._close_handler() + closable._close_handler() # pylint:disable=protected-access elif isinstance(exception, errors.AMQPConnectionError): if hasattr(closable, "_close_connection"): - closable._close_connection() + closable._close_connection() # pylint:disable=protected-access elif isinstance(exception, compat.TimeoutException): pass # Timeout doesn't need to recreate link or connection to retry else: if hasattr(closable, "_close_connection"): - closable._close_connection() + closable._close_connection() # pylint:disable=protected-access # start processing retry delay try: backoff_factor = closable.client.config.backoff_factor @@ -228,7 +222,7 @@ def _handle_exception(exception, retry_count, max_retries, closable, timeout_tim backoff_factor = closable.config.backoff_factor backoff_max = closable.config.backoff_max backoff = backoff_factor * 2 ** retry_count - if backoff <= backoff_max and (timeout_time is None or time.time() + backoff <= timeout_time): + if backoff <= backoff_max and (timeout_time is None or time.time() + backoff <= timeout_time): #pylint:disable=no-else-return time.sleep(backoff) log.info("%r has an exception (%r). Retrying...", format(name), exception) return _create_eventhub_exception(exception) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py index 96c647909043..1a1217957686 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py @@ -28,11 +28,11 @@ def _error(outcome, condition): def _set_partition_key(event_datas, partition_key): ed_iter = iter(event_datas) for ed in ed_iter: - ed._set_partition_key(partition_key) + ed._set_partition_key(partition_key) # pylint:disable=protected-access yield ed -class EventHubProducer(ConsumerProducerMixin): +class EventHubProducer(ConsumerProducerMixin): # pylint:disable=too-many-instance-attributes """ A producer responsible for transmitting EventData to a specific Event Hub, grouped together in batches. Depending on the options specified at creation, the producer may @@ -111,9 +111,9 @@ def _open(self, timeout_time=None, **kwargs): context will be used to create a new handler before opening it. """ - # pylint: disable=protected-access + if not self.running and self.redirected: - self.client._process_redirect_uri(self.redirected) + self.client._process_redirect_uri(self.redirected) # pylint: disable=protected-access self.target = self.redirected.address super(EventHubProducer, self)._open(timeout_time) @@ -140,7 +140,6 @@ def _send_event_data(self, timeout_time=None, last_exception=None): if self._outcome == constants.MessageSendResult.Timeout: self._condition = OperationTimeoutError("send operation timed out") _error(self._outcome, self._condition) - return @_retry_decorator def _send_event_data_with_retry(self, timeout_time=None, last_exception=None): @@ -229,7 +228,7 @@ def send(self, event_data, partition_key=None, timeout=None): wrapper_event_data = event_data else: if isinstance(event_data, EventDataBatch): # The partition_key in the param will be omitted. - if partition_key and not (partition_key == event_data._partition_key): # pylint: disable=protected-access + if partition_key and not partition_key == event_data._partition_key: # pylint: disable=protected-access raise EventDataError('The partition_key does not match the one of the EventDataBatch') wrapper_event_data = event_data # type:ignore else: @@ -240,7 +239,7 @@ def send(self, event_data, partition_key=None, timeout=None): self.unsent_events = [wrapper_event_data.message] self._send_event_data_with_retry(timeout=timeout) - def close(self, exception=None): + def close(self, exception=None): # pylint:disable=useless-super-delegation # type:(Exception) -> None """ Close down the handler. If the handler has already closed, From 2bdbffe0b97f8879d4a803b48ab9c3c5230b4886 Mon Sep 17 00:00:00 2001 From: yijxie Date: Thu, 29 Aug 2019 08:29:02 -0700 Subject: [PATCH 15/51] Fix pylint --- .../azure/eventhub/aio/client_async.py | 1 + .../azure/eventhub/aio/producer_async.py | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index 1c02466a9add..d9b1be918bf4 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -287,4 +287,5 @@ def create_producer( return handler async def close(self): + # type: () -> None await self._conn_manager.close_connection() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index 1f4fde946cf2..1e819aa12d74 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -20,7 +20,7 @@ log = logging.getLogger(__name__) -class EventHubProducer(ConsumerProducerMixin): +class EventHubProducer(ConsumerProducerMixin): # pylint: disable=too-many-instance-attributes """ A producer responsible for transmitting EventData to a specific Event Hub, grouped together in batches. Depending on the options specified at creation, the producer may @@ -94,8 +94,8 @@ def _create_handler(self): keep_alive_interval=self.keep_alive, client_name=self.name, link_properties=self._link_properties, - properties=self.client._create_properties( - self.client.config.user_agent), # pylint: disable=protected-access + properties=self.client._create_properties( # pylint: disable=protected-access + self.client.config.user_agent), loop=self.loop) async def _open(self, timeout_time=None, **kwargs): @@ -106,7 +106,7 @@ async def _open(self, timeout_time=None, **kwargs): """ if not self.running and self.redirected: - self.client._process_redirect_uri(self.redirected) + self.client._process_redirect_uri(self.redirected) # pylint: disable=protected-access self.target = self.redirected.address await super(EventHubProducer, self)._open(timeout_time) @@ -222,7 +222,7 @@ async def send(self, event_data, *, partition_key=None, timeout=None): wrapper_event_data = event_data else: if isinstance(event_data, EventDataBatch): - if partition_key and not (partition_key == event_data._partition_key): # pylint: disable=protected-access + if partition_key and partition_key != event_data._partition_key: # pylint: disable=protected-access raise EventDataError('The partition_key does not match the one of the EventDataBatch') wrapper_event_data = event_data #type: ignore else: @@ -233,7 +233,7 @@ async def send(self, event_data, *, partition_key=None, timeout=None): self.unsent_events = [wrapper_event_data.message] await self._send_event_data_with_retry(timeout=timeout) - async def close(self, exception=None): + async def close(self, exception=None): # type: (Exception) -> None """ Close down the handler. If the handler has already closed, From e8ea699c1ca4c18256035df8215396911bcc027d Mon Sep 17 00:00:00 2001 From: yijxie Date: Thu, 29 Aug 2019 09:05:22 -0700 Subject: [PATCH 16/51] Use properties EventData.partition_key --- sdk/eventhub/azure-eventhubs/tests/test_send.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_send.py b/sdk/eventhub/azure-eventhubs/tests/test_send.py index 8499ff93b36d..249b04215c7a 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_send.py @@ -33,7 +33,7 @@ def test_send_with_partition_key(connstr_receivers): received = partition.receive(timeout=5) for message in received: try: - existing = found_partition_keys[message._partition_key] + existing = found_partition_keys[message.partition_key] assert existing == index except KeyError: found_partition_keys[message._partition_key] = index From cb08478c6d1da26f20ca9e1496da7f0b8abf636f Mon Sep 17 00:00:00 2001 From: yijxie Date: Thu, 29 Aug 2019 12:16:28 -0700 Subject: [PATCH 17/51] Use properties EventData.partition_key --- sdk/eventhub/azure-eventhubs/tests/test_negative.py | 2 +- sdk/eventhub/azure-eventhubs/tests/test_send.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_negative.py b/sdk/eventhub/azure-eventhubs/tests/test_negative.py index a1fee7605818..e9d3a9e17f8e 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_negative.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_negative.py @@ -85,7 +85,7 @@ def test_send_partition_key_with_partition_sync(connection_str): sender = client.create_producer(partition_id="1") try: data = EventData(b"Data") - data.partition_key = b"PKey" + data._set_partition_key(b"PKey") with pytest.raises(ValueError): sender.send(data) finally: diff --git a/sdk/eventhub/azure-eventhubs/tests/test_send.py b/sdk/eventhub/azure-eventhubs/tests/test_send.py index 249b04215c7a..1c34c672d99f 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_send.py @@ -36,7 +36,7 @@ def test_send_with_partition_key(connstr_receivers): existing = found_partition_keys[message.partition_key] assert existing == index except KeyError: - found_partition_keys[message._partition_key] = index + found_partition_keys[message.partition_key] = index @pytest.mark.liveTest From b3dcd076207c629f9721ce9d19336eda62c01b6b Mon Sep 17 00:00:00 2001 From: yijxie Date: Thu, 29 Aug 2019 12:37:51 -0700 Subject: [PATCH 18/51] Temporarily disable pylint errors that need refactoring --- .../azure/eventhub/_consumer_producer_mixin.py | 4 ++-- .../azure/eventhub/aio/_consumer_producer_mixin_async.py | 6 +++--- .../azure-eventhubs/azure/eventhub/aio/producer_async.py | 4 ++-- sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py index 837aaab26d78..fd12b439d3d4 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py @@ -14,7 +14,7 @@ def _retry_decorator(to_be_wrapped_func): - def wrapped_func(self, *args, **kwargs): + def wrapped_func(self, *args, **kwargs): # pylint:disable=unused-argument # TODO: to refactor timeout = kwargs.pop("timeout", 100000) if not timeout: timeout = 100000 # timeout equals to 0 means no timeout, set the value to be a large number. @@ -55,7 +55,7 @@ def _redirect(self, redirect): self.running = False self._close_connection() - def _open(self, timeout_time=None): + def _open(self, timeout_time=None): # pylint:disable=unused-argument # TODO: to refactor """ Open the EventHubConsumer using the supplied connection. If the handler has previously been redirected, the redirect diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py index 0ca5889aa972..53624c36f649 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py @@ -14,7 +14,7 @@ def _retry_decorator(to_be_wrapped_func): - async def wrapped_func(self, *args, **kwargs): + async def wrapped_func(self, *args, **kwargs): # pylint:disable=unused-argument # TODO: to refactor timeout = kwargs.pop("timeout", 100000) if not timeout: timeout = 100000 # timeout equals to 0 means no timeout, set the value to be a large number. @@ -28,7 +28,7 @@ async def wrapped_func(self, *args, **kwargs): self, timeout_time=timeout_time, last_exception=last_exception, **kwargs ) except Exception as exception: # pylint:disable=broad-except - last_exception = await self._handle_exception(exception, retry_count, max_retries, timeout_time) + last_exception = await self._handle_exception(exception, retry_count, max_retries, timeout_time) # pylint:disable=protected-access retry_count += 1 return wrapped_func @@ -58,7 +58,7 @@ async def _redirect(self, redirect): self.running = False await self._close_connection() - async def _open(self, timeout_time=None): + async def _open(self, timeout_time=None): # pylint:disable=unused-argument # TODO: to refactor """ Open the EventHubConsumer using the supplied connection. If the handler has previously been redirected, the redirect diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index 1e819aa12d74..dd05bc567137 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -98,7 +98,7 @@ def _create_handler(self): self.client.config.user_agent), loop=self.loop) - async def _open(self, timeout_time=None, **kwargs): + async def _open(self, timeout_time=None, **kwargs): # pylint:disable=arguments-differ, unused-argument # TODO: to refactor """ Open the EventHubProducer using the supplied connection. If the handler has previously been redirected, the redirect @@ -231,7 +231,7 @@ async def send(self, event_data, *, partition_key=None, timeout=None): wrapper_event_data = EventDataBatch._from_batch(event_data, partition_key) # pylint: disable=protected-access wrapper_event_data.message.on_send_complete = self._on_outcome self.unsent_events = [wrapper_event_data.message] - await self._send_event_data_with_retry(timeout=timeout) + await self._send_event_data_with_retry(timeout=timeout) # pylint:disable=unexpected-keyword-arg # TODO: to refactor async def close(self, exception=None): # type: (Exception) -> None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py index 1a1217957686..ad7e2e7b5f49 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py @@ -104,7 +104,7 @@ def _create_handler(self): link_properties=self._link_properties, properties=self.client._create_properties(self.client.config.user_agent)) # pylint: disable=protected-access - def _open(self, timeout_time=None, **kwargs): + def _open(self, timeout_time=None, **kwargs): # pylint:disable=unused-argument, arguments-differ # TODO:To refactor """ Open the EventHubProducer using the supplied connection. If the handler has previously been redirected, the redirect @@ -237,7 +237,7 @@ def send(self, event_data, partition_key=None, timeout=None): wrapper_event_data = EventDataBatch._from_batch(event_data, partition_key) # pylint: disable=protected-access wrapper_event_data.message.on_send_complete = self._on_outcome self.unsent_events = [wrapper_event_data.message] - self._send_event_data_with_retry(timeout=timeout) + self._send_event_data_with_retry(timeout=timeout) # pylint:disable=unexpected-keyword-arg # TODO:to refactor def close(self, exception=None): # pylint:disable=useless-super-delegation # type:(Exception) -> None From b85e6ccbbc4e23a74e4c96ef22d5290c3c9bf155 Mon Sep 17 00:00:00 2001 From: yijxie Date: Thu, 29 Aug 2019 16:30:27 -0700 Subject: [PATCH 19/51] fix pylint errors --- .../azure-eventhubs/azure/eventhub/aio/client_async.py | 7 ++++--- .../azure-eventhubs/azure/eventhub/aio/producer_async.py | 5 +++-- sdk/eventhub/azure-eventhubs/azure/eventhub/client.py | 2 +- .../azure-eventhubs/azure/eventhub/client_abstract.py | 4 ++-- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index d9b1be918bf4..1dc21f2741f8 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -44,9 +44,10 @@ class EventHubClient(EventHubClientAbstract): """ - def __init__(self, host, event_hub_path, credential, **kwargs): - # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], ...) -> None - + def __init__( + self, host: str, event_hub_path: str, + credential: Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], + **kwargs): super(EventHubClient, self).__init__(host=host, event_hub_path=event_hub_path, credential=credential, **kwargs) self._conn_manager = get_connection_manager(**kwargs) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index dd05bc567137..866063dd48b6 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -184,8 +184,9 @@ async def create_batch(self, max_size=None, partition_key=None): return EventDataBatch(max_size=(max_size or self._max_message_size_on_link), partition_key=partition_key) - async def send(self, event_data, *, partition_key=None, timeout=None): - # type:(Union[EventData, EventDataBatch, Iterable[EventData]],Any, Union[str, bytes], float) -> None + async def send( + self, event_data:Union[EventData, EventDataBatch, Iterable[EventData]], + *, partition_key: Union[str, bytes] = None, timeout: float = None): """ Sends an event data and blocks until acknowledgement is received or operation times out. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index deb07ce8dba7..600faaf31041 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -44,7 +44,7 @@ class EventHubClient(EventHubClientAbstract): """ def __init__(self, host, event_hub_path, credential, **kwargs): - # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], ...) -> None + # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], Any) -> None super(EventHubClient, self).__init__(host=host, event_hub_path=event_hub_path, credential=credential, **kwargs) self._conn_manager = get_connection_manager(**kwargs) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 068bd26810a8..1d33091755d0 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -10,7 +10,7 @@ import time import functools from abc import abstractmethod -from typing import Dict, Union, TYPE_CHECKING +from typing import Dict, Union, Any, TYPE_CHECKING from azure.eventhub import __version__ from azure.eventhub.configuration import _Configuration from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential, _Address @@ -90,7 +90,7 @@ class EventHubClientAbstract(object): # pylint:disable=too-many-instance-attrib """ def __init__(self, host, event_hub_path, credential, **kwargs): - # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], ...) -> None + # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], Any) -> None """ Constructs a new EventHubClient. From 5e51ce27613d5df9907f5133f389e8cbbc0c25cc Mon Sep 17 00:00:00 2001 From: yijxie Date: Thu, 29 Aug 2019 17:15:37 -0700 Subject: [PATCH 20/51] fix pylint errors --- .../azure-eventhubs/azure/eventhub/aio/client_async.py | 6 ++---- .../azure-eventhubs/azure/eventhub/aio/producer_async.py | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index 1dc21f2741f8..84853ce1534b 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -44,10 +44,8 @@ class EventHubClient(EventHubClientAbstract): """ - def __init__( - self, host: str, event_hub_path: str, - credential: Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], - **kwargs): + def __init__(self, host, event_hub_path, credential, **kwargs): + # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], Any) -> None super(EventHubClient, self).__init__(host=host, event_hub_path=event_hub_path, credential=credential, **kwargs) self._conn_manager = get_connection_manager(**kwargs) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index 866063dd48b6..5cf04d26af28 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -185,7 +185,7 @@ async def create_batch(self, max_size=None, partition_key=None): return EventDataBatch(max_size=(max_size or self._max_message_size_on_link), partition_key=partition_key) async def send( - self, event_data:Union[EventData, EventDataBatch, Iterable[EventData]], + self, event_data: Union[EventData, EventDataBatch, Iterable[EventData]], *, partition_key: Union[str, bytes] = None, timeout: float = None): """ Sends an event data and blocks until acknowledgement is From 726bf6f0b3034295fa6891c8962f22468ac5efef Mon Sep 17 00:00:00 2001 From: yijxie Date: Thu, 29 Aug 2019 17:45:17 -0700 Subject: [PATCH 21/51] ignore eventprocessor pylint temporarily --- pylintrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pylintrc b/pylintrc index 48e75db6ae1c..c5cddb6d9030 100644 --- a/pylintrc +++ b/pylintrc @@ -2,8 +2,8 @@ ignore-patterns=test_*,conftest,setup reports=no -# PYLINT DIRECTORY BLACKLIST -ignore=_generated,samples,examples,test,tests,doc,.tox +# PYLINT DIRECTORY BLACKLIST. Ignore eventprocessor temporarily until new eventprocessor code is merged to master +ignore=_generated,samples,examples,test,tests,doc,.tox,eventprocessor init-hook='import sys; sys.path.insert(0, os.path.abspath(os.getcwd().rsplit("azure-sdk-for-python", 1)[0] + "azure-sdk-for-python/scripts/pylint_custom_plugin"))' load-plugins=pylint_guidelines_checker From ffd8cb073d9887b4a4afcc8290697a16b5ebfae6 Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 30 Aug 2019 09:41:22 -0700 Subject: [PATCH 22/51] small pylint adjustment --- sdk/eventhub/azure-eventhubs/azure/eventhub/common.py | 2 +- sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index 56ea25863be3..3979463eef42 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -308,7 +308,7 @@ def try_add(self, event_data): raise TypeError('event_data should be type of EventData') if self._partition_key: - if event_data.partition_key and not event_data.partition_key == self._partition_key: + if event_data.partition_key and event_data.partition_key != self._partition_key: raise EventDataError('The partition_key of event_data does not match the one of the EventDataBatch') if not event_data.partition_key: event_data._set_partition_key(self._partition_key) # pylint:disable=protected-access diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py index ad7e2e7b5f49..a36541475ceb 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py @@ -228,7 +228,7 @@ def send(self, event_data, partition_key=None, timeout=None): wrapper_event_data = event_data else: if isinstance(event_data, EventDataBatch): # The partition_key in the param will be omitted. - if partition_key and not partition_key == event_data._partition_key: # pylint: disable=protected-access + if partition_key and partition_key != event_data._partition_key: # pylint: disable=protected-access raise EventDataError('The partition_key does not match the one of the EventDataBatch') wrapper_event_data = event_data # type:ignore else: From e5c8d1c88a262ea6f809204bacb36bcde4ec7c2a Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 30 Aug 2019 10:53:09 -0700 Subject: [PATCH 23/51] Add typing for Python2.7 --- sdk/eventhub/azure-eventhubs/setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sdk/eventhub/azure-eventhubs/setup.py b/sdk/eventhub/azure-eventhubs/setup.py index 41b8e7c36800..6507991a8eb1 100644 --- a/sdk/eventhub/azure-eventhubs/setup.py +++ b/sdk/eventhub/azure-eventhubs/setup.py @@ -80,5 +80,6 @@ ], extras_require={ ":python_version<'3.0'": ['azure-nspkg'], + ":python_version<'3.5'": ["typing"], } ) From e85ac177e2fe174029995e337df91171faa2ba97 Mon Sep 17 00:00:00 2001 From: "Adam Ling (MSFT)" <47871814+yunhaoling@users.noreply.github.com> Date: Sun, 1 Sep 2019 17:34:33 -0700 Subject: [PATCH 24/51] [EventHub] IoTHub management operations improvement and bug fixing (#6894) * Fix bug that iothub hub can't receive * Support direct mgmt ops of iothub * Improve mgmt ops and update livetest * Small fix * Improvement of iothub mgmt --- .../eventhub/_consumer_producer_mixin.py | 2 +- .../azure/eventhub/aio/client_async.py | 27 +++++++- .../azure/eventhub/aio/consumer_async.py | 17 +++-- .../azure-eventhubs/azure/eventhub/client.py | 26 ++++++- .../azure/eventhub/client_abstract.py | 12 +++- .../azure/eventhub/consumer.py | 14 +++- .../asynctests/test_iothub_receive_async.py | 67 +++++++++++++++---- .../tests/test_iothub_receive.py | 52 ++++++++++++-- 8 files changed, 185 insertions(+), 32 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py index fd12b439d3d4..96479a6ef31b 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py @@ -73,7 +73,7 @@ def _open(self, timeout_time=None): # pylint:disable=unused-argument # TODO: to else: alt_creds = {} self._create_handler() - self._handler.open(connection=self.client._conn_manager.get_connection( + self._handler.open(connection=self.client._conn_manager.get_connection( # pylint: disable=protected-access self.client.address.hostname, self.client.get_auth(**alt_creds) )) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index 84853ce1534b..00b136b8bed5 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -6,6 +6,7 @@ import datetime import functools import asyncio + from typing import Any, List, Dict, Union, TYPE_CHECKING from uamqp import authentication, constants # type: ignore @@ -47,6 +48,7 @@ class EventHubClient(EventHubClientAbstract): def __init__(self, host, event_hub_path, credential, **kwargs): # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], Any) -> None super(EventHubClient, self).__init__(host=host, event_hub_path=event_hub_path, credential=credential, **kwargs) + self._lock = asyncio.Lock() self._conn_manager = get_connection_manager(**kwargs) async def __aenter__(self): @@ -105,10 +107,17 @@ async def _close_connection(self): await self._conn_manager.reset_connection_if_broken() async def _management_request(self, mgmt_msg, op_type): + if self._is_iothub and not self._iothub_redirect_info: + await self._iothub_redirect() + + alt_creds = { + "username": self._auth_config.get("iot_username"), + "password": self._auth_config.get("iot_password") + } max_retries = self.config.max_retries retry_count = 0 while True: - mgmt_auth = self._create_auth() + mgmt_auth = self._create_auth(**alt_creds) mgmt_client = AMQPClientAsync(self.mgmt_target, auth=mgmt_auth, debug=self.config.network_tracing) try: conn = await self._conn_manager.get_connection(self.host, mgmt_auth) @@ -126,6 +135,18 @@ async def _management_request(self, mgmt_msg, op_type): finally: await mgmt_client.close_async() + async def _iothub_redirect(self): + async with self._lock: + if self._is_iothub and not self._iothub_redirect_info: + if not self._redirect_consumer: + self._redirect_consumer = self.create_consumer(consumer_group='$default', + partition_id='0', + event_position=EventPosition('-1'), + operation='/messages/events') + async with self._redirect_consumer: + await self._redirect_consumer._open_with_retry(timeout=self.config.receive_timeout) # pylint: disable=protected-access + self._redirect_consumer = None + async def get_properties(self): # type:() -> Dict[str, Any] """ @@ -139,6 +160,8 @@ async def get_properties(self): :rtype: dict :raises: ~azure.eventhub.ConnectError """ + if self._is_iothub and not self._iothub_redirect_info: + await self._iothub_redirect() mgmt_msg = Message(application_properties={'name': self.eh_name}) response = await self._management_request(mgmt_msg, op_type=b'com.microsoft:eventhub') output = {} @@ -178,6 +201,8 @@ async def get_partition_properties(self, partition): :rtype: dict :raises: ~azure.eventhub.ConnectError """ + if self._is_iothub and not self._iothub_redirect_info: + await self._iothub_redirect() mgmt_msg = Message(application_properties={'name': self.eh_name, 'partition': partition}) response = await self._management_request(mgmt_msg, op_type=b'com.microsoft:partition') diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py index 3747d1af4d9d..49e85fe2e811 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py @@ -110,8 +110,10 @@ async def __anext__(self): def _create_handler(self): alt_creds = { - "username": self.client._auth_config.get("iot_username"), # pylint:disable=protected-access - "password": self.client._auth_config.get("iot_password")} # pylint:disable=protected-access + "username": self.client._auth_config.get("iot_username") if self.redirected else None, # pylint:disable=protected-access + "password": self.client._auth_config.get("iot_password") if self.redirected else None # pylint:disable=protected-access + } + source = Source(self.source) if self.offset is not None: source.set_filter(self.offset._selector()) # pylint:disable=protected-access @@ -134,7 +136,7 @@ async def _redirect(self, redirect): self.messages_iter = None await super(EventHubConsumer, self)._redirect(redirect) - async def _open(self, timeout_time=None): + async def _open(self, timeout_time=None, **kwargs): """ Open the EventHubConsumer using the supplied connection. If the handler has previously been redirected, the redirect @@ -142,11 +144,17 @@ async def _open(self, timeout_time=None): """ # pylint: disable=protected-access + self.redirected = self.redirected or self.client._iothub_redirect_info + if not self.running and self.redirected: self.client._process_redirect_uri(self.redirected) self.source = self.redirected.address await super(EventHubConsumer, self)._open(timeout_time) + @_retry_decorator + async def _open_with_retry(self, timeout_time=None, **kwargs): + return await self._open(timeout_time=timeout_time, **kwargs) + async def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): last_exception = kwargs.get("last_exception") data_batch = kwargs.get("data_batch") @@ -254,4 +262,5 @@ async def close(self, exception=None): self.error = EventHubError(str(exception)) else: self.error = EventHubError("This receive handler is now closed.") - await self._handler.close_async() + if self._handler: + await self._handler.close_async() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index 600faaf31041..f0e4eca2ac47 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -7,6 +7,8 @@ import logging import datetime import functools +import threading + from typing import Any, List, Dict, Union, TYPE_CHECKING import uamqp # type: ignore @@ -46,6 +48,7 @@ class EventHubClient(EventHubClientAbstract): def __init__(self, host, event_hub_path, credential, **kwargs): # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], Any) -> None super(EventHubClient, self).__init__(host=host, event_hub_path=event_hub_path, credential=credential, **kwargs) + self._lock = threading.RLock() self._conn_manager = get_connection_manager(**kwargs) def __enter__(self): @@ -106,10 +109,15 @@ def _close_connection(self): self._conn_manager.reset_connection_if_broken() def _management_request(self, mgmt_msg, op_type): + alt_creds = { + "username": self._auth_config.get("iot_username"), + "password": self._auth_config.get("iot_password") + } + max_retries = self.config.max_retries retry_count = 0 while retry_count <= self.config.max_retries: - mgmt_auth = self._create_auth() + mgmt_auth = self._create_auth(**alt_creds) mgmt_client = uamqp.AMQPClient(self.mgmt_target) try: conn = self._conn_manager.get_connection(self.host, mgmt_auth) #pylint:disable=assignment-from-none @@ -127,6 +135,18 @@ def _management_request(self, mgmt_msg, op_type): finally: mgmt_client.close() + def _iothub_redirect(self): + with self._lock: + if self._is_iothub and not self._iothub_redirect_info: + if not self._redirect_consumer: + self._redirect_consumer = self.create_consumer(consumer_group='$default', + partition_id='0', + event_position=EventPosition('-1'), + operation='/messages/events') + with self._redirect_consumer: + self._redirect_consumer._open_with_retry(timeout=self.config.receive_timeout) # pylint: disable=protected-access + self._redirect_consumer = None + def get_properties(self): # type:() -> Dict[str, Any] """ @@ -140,6 +160,8 @@ def get_properties(self): :rtype: dict :raises: ~azure.eventhub.ConnectError """ + if self._is_iothub and not self._iothub_redirect_info: + self._iothub_redirect() mgmt_msg = Message(application_properties={'name': self.eh_name}) response = self._management_request(mgmt_msg, op_type=b'com.microsoft:eventhub') output = {} @@ -179,6 +201,8 @@ def get_partition_properties(self, partition): :rtype: dict :raises: ~azure.eventhub.ConnectError """ + if self._is_iothub and not self._iothub_redirect_info: + self._iothub_redirect() mgmt_msg = Message(application_properties={'name': self.eh_name, 'partition': partition}) response = self._management_request(mgmt_msg, op_type=b'com.microsoft:partition') diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 1d33091755d0..8b6712ce207e 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -11,7 +11,8 @@ import functools from abc import abstractmethod from typing import Dict, Union, Any, TYPE_CHECKING -from azure.eventhub import __version__ + +from azure.eventhub import __version__, EventPosition from azure.eventhub.configuration import _Configuration from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential, _Address @@ -153,6 +154,8 @@ def __init__(self, host, event_hub_path, credential, **kwargs): self.get_auth = functools.partial(self._create_auth) self.config = _Configuration(**kwargs) self.debug = self.config.network_tracing + self._is_iothub = False + self._iothub_redirect_info = None log.info("%r: Created the Event Hub client", self.container_id) @@ -173,6 +176,11 @@ def _from_iothub_connection_string(cls, conn_str, **kwargs): 'iot_password': key, 'username': username, 'password': password} + client._is_iothub = True + client._redirect_consumer = client.create_consumer(consumer_group='$default', + partition_id='0', + event_position=EventPosition('-1'), + operation='/messages/events') return client @abstractmethod @@ -213,6 +221,8 @@ def _process_redirect_uri(self, redirect): self.auth_uri = "sb://{}{}".format(self.address.hostname, self.address.path) self.eh_name = self.address.path.lstrip('/') self.mgmt_target = redirect_uri + if self._is_iothub: + self._iothub_redirect_info = redirect @classmethod def from_connection_string(cls, conn_str, **kwargs): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py index 44be38386b73..6306c750260a 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py @@ -106,8 +106,10 @@ def __next__(self): def _create_handler(self): alt_creds = { - "username": self.client._auth_config.get("iot_username"), # pylint:disable=protected-access - "password": self.client._auth_config.get("iot_password")} # pylint:disable=protected-access + "username": self.client._auth_config.get("iot_username") if self.redirected else None, # pylint:disable=protected-access + "password": self.client._auth_config.get("iot_password") if self.redirected else None # pylint:disable=protected-access + } + source = Source(self.source) if self.offset is not None: source.set_filter(self.offset._selector()) # pylint:disable=protected-access @@ -129,7 +131,7 @@ def _redirect(self, redirect): self.messages_iter = None super(EventHubConsumer, self)._redirect(redirect) - def _open(self, timeout_time=None): + def _open(self, timeout_time=None, **kwargs): """ Open the EventHubConsumer using the supplied connection. If the handler has previously been redirected, the redirect @@ -137,11 +139,17 @@ def _open(self, timeout_time=None): """ # pylint: disable=protected-access + self.redirected = self.redirected or self.client._iothub_redirect_info + if not self.running and self.redirected: self.client._process_redirect_uri(self.redirected) self.source = self.redirected.address super(EventHubConsumer, self)._open(timeout_time) + @_retry_decorator + def _open_with_retry(self, timeout_time=None, **kwargs): + return self._open(timeout_time=timeout_time, **kwargs) + def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): last_exception = kwargs.get("last_exception") data_batch = kwargs.get("data_batch") diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py index f581f64584ab..9226a5d62a93 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py @@ -4,13 +4,11 @@ # license information. #-------------------------------------------------------------------------- -import os import asyncio import pytest -import time from azure.eventhub.aio import EventHubClient -from azure.eventhub import EventData, EventPosition, EventHubError +from azure.eventhub import EventPosition async def pump(receiver, sleep=None): @@ -18,25 +16,16 @@ async def pump(receiver, sleep=None): if sleep: await asyncio.sleep(sleep) async with receiver: - batch = await receiver.receive(timeout=1) + batch = await receiver.receive(timeout=3) messages += len(batch) return messages -async def get_partitions(iot_connection_str): - client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) - receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), prefetch=1000, operation='/messages/events') - async with receiver: - partitions = await client.get_properties() - return partitions["partition_ids"] - - @pytest.mark.liveTest @pytest.mark.asyncio async def test_iothub_receive_multiple_async(iot_connection_str): - pytest.skip("This will get AuthenticationError. We're investigating...") - partitions = await get_partitions(iot_connection_str) client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) + partitions = await client.get_partition_ids() receivers = [] for p in partitions: receivers.append(client.create_consumer(consumer_group="$default", partition_id=p, event_position=EventPosition("-1"), prefetch=10, operation='/messages/events')) @@ -44,3 +33,53 @@ async def test_iothub_receive_multiple_async(iot_connection_str): assert isinstance(outputs[0], int) and outputs[0] <= 10 assert isinstance(outputs[1], int) and outputs[1] <= 10 + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_iothub_get_properties_async(iot_connection_str, device_id): + client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) + properties = await client.get_properties() + assert properties["partition_ids"] == ["0", "1", "2", "3"] + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_iothub_get_partition_ids_async(iot_connection_str, device_id): + client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) + partitions = await client.get_partition_ids() + assert partitions == ["0", "1", "2", "3"] + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_iothub_get_partition_properties_async(iot_connection_str, device_id): + client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) + partition_properties = await client.get_partition_properties("0") + assert partition_properties["id"] == "0" + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_iothub_receive_after_mgmt_ops_async(iot_connection_str, device_id): + client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) + partitions = await client.get_partition_ids() + assert partitions == ["0", "1", "2", "3"] + receiver = client.create_consumer(consumer_group="$default", partition_id=partitions[0], event_position=EventPosition("-1"), operation='/messages/events') + async with receiver: + received = await receiver.receive(timeout=5) + assert len(received) == 0 + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_iothub_mgmt_ops_after_receive_async(iot_connection_str, device_id): + client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') + async with receiver: + received = await receiver.receive(timeout=5) + assert len(received) == 0 + + partitions = await client.get_partition_ids() + assert partitions == ["0", "1", "2", "3"] + diff --git a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py index ac5787b6b12e..ac7e211bd736 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py @@ -4,23 +4,61 @@ # license information. #-------------------------------------------------------------------------- -import os import pytest -import time -from azure.eventhub import EventData, EventPosition, EventHubClient +from azure.eventhub import EventPosition, EventHubClient @pytest.mark.liveTest def test_iothub_receive_sync(iot_connection_str, device_id): - pytest.skip("current code will cause ErrorCodes.LinkRedirect") client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') - receiver._open() try: - partitions = client.get_properties() - assert partitions["partition_ids"] == ["0", "1", "2", "3"] received = receiver.receive(timeout=5) assert len(received) == 0 finally: receiver.close() + + +@pytest.mark.liveTest +def test_iothub_get_properties_sync(iot_connection_str, device_id): + client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) + properties = client.get_properties() + assert properties["partition_ids"] == ["0", "1", "2", "3"] + + +@pytest.mark.liveTest +def test_iothub_get_partition_ids_sync(iot_connection_str, device_id): + client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) + partitions = client.get_partition_ids() + assert partitions == ["0", "1", "2", "3"] + + +@pytest.mark.liveTest +def test_iothub_get_partition_properties_sync(iot_connection_str, device_id): + client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) + partition_properties = client.get_partition_properties("0") + assert partition_properties["id"] == "0" + + +@pytest.mark.liveTest +def test_iothub_receive_after_mgmt_ops_sync(iot_connection_str, device_id): + client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) + partitions = client.get_partition_ids() + assert partitions == ["0", "1", "2", "3"] + receiver = client.create_consumer(consumer_group="$default", partition_id=partitions[0], event_position=EventPosition("-1"), operation='/messages/events') + with receiver: + received = receiver.receive(timeout=5) + assert len(received) == 0 + + +@pytest.mark.liveTest +def test_iothub_mgmt_ops_after_receive_sync(iot_connection_str, device_id): + client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') + with receiver: + received = receiver.receive(timeout=5) + assert len(received) == 0 + + partitions = client.get_partition_ids() + assert partitions == ["0", "1", "2", "3"] From 1fb341b6c203b6f73fa0a78cd85c87d0f66c4a43 Mon Sep 17 00:00:00 2001 From: "Adam Ling (MSFT)" <47871814+yunhaoling@users.noreply.github.com> Date: Mon, 2 Sep 2019 17:21:52 -0700 Subject: [PATCH 25/51] [EventHub] Retry refactor (#7026) * Retry refactor * Refactor retry, delay and handle exception * Remove unused module * Small fix * Small fix --- .../eventhub/_consumer_producer_mixin.py | 52 +++++++++++-------- .../aio/_consumer_producer_mixin_async.py | 52 ++++++++++--------- .../azure/eventhub/aio/client_async.py | 32 +++++++----- .../azure/eventhub/aio/consumer_async.py | 34 ++++++------ .../azure/eventhub/aio/error_async.py | 28 ++-------- .../azure/eventhub/aio/producer_async.py | 21 ++++---- .../azure-eventhubs/azure/eventhub/client.py | 28 ++++++---- .../azure/eventhub/consumer.py | 34 ++++++------ .../azure-eventhubs/azure/eventhub/error.py | 28 ++-------- .../azure/eventhub/producer.py | 23 ++++---- 10 files changed, 157 insertions(+), 175 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py index 96479a6ef31b..282a8c574088 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py @@ -13,24 +13,6 @@ log = logging.getLogger(__name__) -def _retry_decorator(to_be_wrapped_func): - def wrapped_func(self, *args, **kwargs): # pylint:disable=unused-argument # TODO: to refactor - timeout = kwargs.pop("timeout", 100000) - if not timeout: - timeout = 100000 # timeout equals to 0 means no timeout, set the value to be a large number. - timeout_time = time.time() + timeout - max_retries = self.client.config.max_retries - retry_count = 0 - last_exception = None - while True: - try: - return to_be_wrapped_func(self, timeout_time=timeout_time, last_exception=last_exception, **kwargs) - except Exception as exception: # pylint:disable=broad-except - last_exception = self._handle_exception(exception, retry_count, max_retries, timeout_time) # pylint:disable=protected-access - retry_count += 1 - return wrapped_func - - class ConsumerProducerMixin(object): def __init__(self): self.client = None @@ -55,9 +37,9 @@ def _redirect(self, redirect): self.running = False self._close_connection() - def _open(self, timeout_time=None): # pylint:disable=unused-argument # TODO: to refactor + def _open(self): """ - Open the EventHubConsumer using the supplied connection. + Open the EventHubConsumer/EventHubProducer using the supplied connection. If the handler has previously been redirected, the redirect context will be used to create a new handler before opening it. @@ -91,12 +73,36 @@ def _close_connection(self): self._close_handler() self.client._conn_manager.reset_connection_if_broken() # pylint: disable=protected-access - def _handle_exception(self, exception, retry_count, max_retries, timeout_time): + def _handle_exception(self, exception): if not self.running and isinstance(exception, compat.TimeoutException): exception = errors.AuthenticationException("Authorization timeout.") - return _handle_exception(exception, retry_count, max_retries, self, timeout_time) + return _handle_exception(exception, self) + + return _handle_exception(exception, self) + + def _do_retryable_operation(self, operation, timeout=None, **kwargs): + # pylint:disable=protected-access + if not timeout: + timeout = 100000 # timeout equals to 0 means no timeout, set the value to be a large number. + timeout_time = time.time() + timeout + retried_times = 0 + last_exception = kwargs.pop('last_exception', None) + operation_need_param = kwargs.pop('operation_need_param', True) + + while retried_times <= self.client.config.max_retries: + try: + if operation_need_param: + return operation(timeout_time=timeout_time, last_exception=last_exception, **kwargs) + else: + return operation() + except Exception as exception: # pylint:disable=broad-except + last_exception = self._handle_exception(exception) + self.client._try_delay(retried_times=retried_times, last_exception=last_exception, + timeout_time=timeout_time, entity_name=self.name) + retried_times += 1 - return _handle_exception(exception, retry_count, max_retries, self, timeout_time) + log.info("%r has exhausted retry. Exception still occurs (%r)", self.name, last_exception) + raise last_exception def close(self, exception=None): # type:(Exception) -> None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py index 53624c36f649..64873f843dc4 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py @@ -13,26 +13,6 @@ log = logging.getLogger(__name__) -def _retry_decorator(to_be_wrapped_func): - async def wrapped_func(self, *args, **kwargs): # pylint:disable=unused-argument # TODO: to refactor - timeout = kwargs.pop("timeout", 100000) - if not timeout: - timeout = 100000 # timeout equals to 0 means no timeout, set the value to be a large number. - timeout_time = time.time() + timeout - max_retries = self.client.config.max_retries - retry_count = 0 - last_exception = None - while True: - try: - return await to_be_wrapped_func( - self, timeout_time=timeout_time, last_exception=last_exception, **kwargs - ) - except Exception as exception: # pylint:disable=broad-except - last_exception = await self._handle_exception(exception, retry_count, max_retries, timeout_time) # pylint:disable=protected-access - retry_count += 1 - return wrapped_func - - class ConsumerProducerMixin(object): def __init__(self): @@ -58,7 +38,7 @@ async def _redirect(self, redirect): self.running = False await self._close_connection() - async def _open(self, timeout_time=None): # pylint:disable=unused-argument # TODO: to refactor + async def _open(self): """ Open the EventHubConsumer using the supplied connection. If the handler has previously been redirected, the redirect @@ -94,12 +74,36 @@ async def _close_connection(self): await self._close_handler() await self.client._conn_manager.reset_connection_if_broken() # pylint:disable=protected-access - async def _handle_exception(self, exception, retry_count, max_retries, timeout_time): + async def _handle_exception(self, exception): if not self.running and isinstance(exception, compat.TimeoutException): exception = errors.AuthenticationException("Authorization timeout.") - return await _handle_exception(exception, retry_count, max_retries, self, timeout_time) + return await _handle_exception(exception, self) + + return await _handle_exception(exception, self) + + async def _do_retryable_operation(self, operation, timeout=None, **kwargs): + # pylint:disable=protected-access + if not timeout: + timeout = 100000 # timeout equals to 0 means no timeout, set the value to be a large number. + timeout_time = time.time() + timeout + retried_times = 0 + last_exception = kwargs.pop('last_exception', None) + operation_need_param = kwargs.pop('operation_need_param', True) + + while retried_times <= self.client.config.max_retries: + try: + if operation_need_param: + return await operation(timeout_time=timeout_time, last_exception=last_exception, **kwargs) + else: + return await operation() + except Exception as exception: # pylint:disable=broad-except + last_exception = await self._handle_exception(exception) + await self.client._try_delay(retried_times=retried_times, last_exception=last_exception, + timeout_time=timeout_time, entity_name=self.name) + retried_times += 1 - return await _handle_exception(exception, retry_count, max_retries, self, timeout_time) + log.info("%r has exhausted retry. Exception still occurs (%r)", self.name, last_exception) + raise last_exception async def close(self, exception=None): # type: (Exception) -> None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index 00b136b8bed5..10756da08701 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -4,6 +4,7 @@ # -------------------------------------------------------------------------------------------- import logging import datetime +import time import functools import asyncio @@ -100,23 +101,29 @@ def _create_auth(self, username=None, password=None): get_jwt_token, http_proxy=http_proxy, transport_type=transport_type) - async def _handle_exception(self, exception, retry_count, max_retries): - await _handle_exception(exception, retry_count, max_retries, self) - async def _close_connection(self): await self._conn_manager.reset_connection_if_broken() - async def _management_request(self, mgmt_msg, op_type): - if self._is_iothub and not self._iothub_redirect_info: - await self._iothub_redirect() + async def _try_delay(self, retried_times, last_exception, timeout_time=None, entity_name=None): + entity_name = entity_name or self.container_id + backoff = self.config.backoff_factor * 2 ** retried_times + if backoff <= self.config.backoff_max and ( + timeout_time is None or time.time() + backoff <= timeout_time): # pylint:disable=no-else-return + asyncio.sleep(backoff) + log.info("%r has an exception (%r). Retrying...", format(entity_name), last_exception) + else: + log.info("%r operation has timed out. Last exception before timeout is (%r)", + entity_name, last_exception) + raise last_exception + async def _management_request(self, mgmt_msg, op_type): alt_creds = { "username": self._auth_config.get("iot_username"), "password": self._auth_config.get("iot_password") } - max_retries = self.config.max_retries - retry_count = 0 - while True: + + retried_times = 0 + while retried_times <= self.config.max_retries: mgmt_auth = self._create_auth(**alt_creds) mgmt_client = AMQPClientAsync(self.mgmt_target, auth=mgmt_auth, debug=self.config.network_tracing) try: @@ -130,8 +137,9 @@ async def _management_request(self, mgmt_msg, op_type): description_fields=b'status-description') return response except Exception as exception: # pylint:disable=broad-except - await self._handle_exception(exception, retry_count, max_retries) - retry_count += 1 + last_exception = await _handle_exception(exception, self) + await self._try_delay(retried_times=retried_times, last_exception=last_exception) + retried_times += 1 finally: await mgmt_client.close_async() @@ -144,7 +152,7 @@ async def _iothub_redirect(self): event_position=EventPosition('-1'), operation='/messages/events') async with self._redirect_consumer: - await self._redirect_consumer._open_with_retry(timeout=self.config.receive_timeout) # pylint: disable=protected-access + await self._redirect_consumer._open_with_retry() # pylint: disable=protected-access self._redirect_consumer = None async def get_properties(self): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py index 49e85fe2e811..f26853e32cac 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py @@ -13,7 +13,7 @@ from azure.eventhub import EventData, EventPosition from azure.eventhub.error import EventHubError, ConnectError, _error_handler -from ._consumer_producer_mixin_async import ConsumerProducerMixin, _retry_decorator +from ._consumer_producer_mixin_async import ConsumerProducerMixin log = logging.getLogger(__name__) @@ -92,9 +92,8 @@ def __aiter__(self): return self async def __anext__(self): - max_retries = self.client.config.max_retries - retry_count = 0 - while True: + retried_times = 0 + while retried_times < self.client.config.max_retries: try: await self._open() if not self.messages_iter: @@ -102,11 +101,13 @@ async def __anext__(self): message = await self.messages_iter.__anext__() event_data = EventData._from_message(message) # pylint:disable=protected-access self.offset = EventPosition(event_data.offset, inclusive=False) - retry_count = 0 + retried_times = 0 return event_data except Exception as exception: # pylint:disable=broad-except - await self._handle_exception(exception, retry_count, max_retries, timeout_time=None) - retry_count += 1 + last_exception = await self._handle_exception(exception) + await self.client._try_delay(retried_times=retried_times, last_exception=last_exception, + entity_name=self.name) + retried_times += 1 def _create_handler(self): alt_creds = { @@ -136,7 +137,7 @@ async def _redirect(self, redirect): self.messages_iter = None await super(EventHubConsumer, self)._redirect(redirect) - async def _open(self, timeout_time=None, **kwargs): + async def _open(self): """ Open the EventHubConsumer using the supplied connection. If the handler has previously been redirected, the redirect @@ -149,17 +150,16 @@ async def _open(self, timeout_time=None, **kwargs): if not self.running and self.redirected: self.client._process_redirect_uri(self.redirected) self.source = self.redirected.address - await super(EventHubConsumer, self)._open(timeout_time) + await super(EventHubConsumer, self)._open() - @_retry_decorator - async def _open_with_retry(self, timeout_time=None, **kwargs): - return await self._open(timeout_time=timeout_time, **kwargs) + async def _open_with_retry(self): + return await self._do_retryable_operation(self._open, operation_need_param=False) async def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): last_exception = kwargs.get("last_exception") data_batch = kwargs.get("data_batch") - await self._open(timeout_time) + await self._open() remaining_time = timeout_time - time.time() if remaining_time <= 0.0: if last_exception: @@ -177,9 +177,9 @@ async def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): data_batch.append(event_data) return data_batch - @_retry_decorator - async def _receive_with_try(self, timeout_time=None, max_batch_size=None, **kwargs): - return await self._receive(timeout_time=timeout_time, max_batch_size=max_batch_size, **kwargs) + async def _receive_with_retry(self, timeout=None, max_batch_size=None, **kwargs): + return await self._do_retryable_operation(self._receive, timeout=timeout, + max_batch_size=max_batch_size, **kwargs) @property def queue_size(self): @@ -227,7 +227,7 @@ async def receive(self, *, max_batch_size=None, timeout=None): max_batch_size = max_batch_size or min(self.client.config.max_batch_size, self.prefetch) data_batch = [] # type: List[EventData] - return await self._receive_with_try(timeout=timeout, max_batch_size=max_batch_size, data_batch=data_batch) + return await self._receive_with_retry(timeout=timeout, max_batch_size=max_batch_size, data_batch=data_batch) async def close(self, exception=None): # type: (Exception) -> None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py index 5d0cff3ebc1d..58ecee91ad1b 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py @@ -36,7 +36,7 @@ def _create_eventhub_exception(exception): return error -async def _handle_exception(exception, retry_count, max_retries, closable, timeout_time=None): # pylint:disable=too-many-branches, too-many-statements +async def _handle_exception(exception, closable): # pylint:disable=too-many-branches, too-many-statements if isinstance(exception, asyncio.CancelledError): raise exception try: @@ -45,10 +45,10 @@ async def _handle_exception(exception, retry_count, max_retries, closable, timeo name = closable.container_id if isinstance(exception, KeyboardInterrupt): # pylint:disable=no-else-raise log.info("%r stops due to keyboard interrupt", name) - closable.close() + await closable.close() raise exception elif isinstance(exception, EventHubError): - closable.close() + await closable.close() raise exception elif isinstance(exception, ( errors.MessageAccepted, @@ -65,10 +65,6 @@ async def _handle_exception(exception, retry_count, max_retries, closable, timeo log.info("%r Event data send error (%r)", name, exception) error = EventDataSendError(str(exception), exception) raise error - elif retry_count >= max_retries: - error = _create_eventhub_exception(exception) - log.info("%r has exhausted retry. Exception still occurs (%r)", name, exception) - raise error else: if isinstance(exception, errors.AuthenticationException): if hasattr(closable, "_close_connection"): @@ -95,20 +91,4 @@ async def _handle_exception(exception, retry_count, max_retries, closable, timeo else: if hasattr(closable, "_close_connection"): await closable._close_connection() # pylint:disable=protected-access - # start processing retry delay - try: - backoff_factor = closable.client.config.backoff_factor - backoff_max = closable.client.config.backoff_max - except AttributeError: - backoff_factor = closable.config.backoff_factor - backoff_max = closable.config.backoff_max - backoff = backoff_factor * 2 ** retry_count - if backoff <= backoff_max and (timeout_time is None or time.time() + backoff <= timeout_time): # pylint:disable=no-else-return - await asyncio.sleep(backoff) - log.info("%r has an exception (%r). Retrying...", format(name), exception) - return _create_eventhub_exception(exception) - else: - error = _create_eventhub_exception(exception) - log.info("%r operation has timed out. Last exception before timeout is (%r)", name, error) - raise error - # end of processing retry delay + return _create_eventhub_exception(exception) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index 5cf04d26af28..f9fb32420e81 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -14,8 +14,7 @@ from azure.eventhub.common import EventData, EventDataBatch from azure.eventhub.error import _error_handler, OperationTimeoutError, EventDataError from ..producer import _error, _set_partition_key -from ._consumer_producer_mixin_async import ConsumerProducerMixin, _retry_decorator - +from ._consumer_producer_mixin_async import ConsumerProducerMixin log = logging.getLogger(__name__) @@ -98,7 +97,7 @@ def _create_handler(self): self.client.config.user_agent), loop=self.loop) - async def _open(self, timeout_time=None, **kwargs): # pylint:disable=arguments-differ, unused-argument # TODO: to refactor + async def _open(self): """ Open the EventHubProducer using the supplied connection. If the handler has previously been redirected, the redirect @@ -108,15 +107,14 @@ async def _open(self, timeout_time=None, **kwargs): # pylint:disable=arguments- if not self.running and self.redirected: self.client._process_redirect_uri(self.redirected) # pylint: disable=protected-access self.target = self.redirected.address - await super(EventHubProducer, self)._open(timeout_time) + await super(EventHubProducer, self)._open() - @_retry_decorator - async def _open_with_retry(self, timeout_time=None, **kwargs): - return await self._open(timeout_time=timeout_time, **kwargs) + async def _open_with_retry(self): + return await self._do_retryable_operation(self._open, operation_need_param=False) async def _send_event_data(self, timeout_time=None, last_exception=None): if self.unsent_events: - await self._open(timeout_time) + await self._open() remaining_time = timeout_time - time.time() if remaining_time <= 0.0: if last_exception: @@ -135,9 +133,8 @@ async def _send_event_data(self, timeout_time=None, last_exception=None): _error(self._outcome, self._condition) return - @_retry_decorator - async def _send_event_data_with_retry(self, timeout_time=None, last_exception=None): - return await self._send_event_data(timeout_time=timeout_time, last_exception=last_exception) + async def _send_event_data_with_retry(self, timeout=None): + return await self._do_retryable_operation(self._send_event_data, timeout=timeout) def _on_outcome(self, outcome, condition): """ @@ -176,7 +173,7 @@ async def create_batch(self, max_size=None, partition_key=None): """ if not self._max_message_size_on_link: - await self._open_with_retry(timeout=self.client.config.send_timeout) + await self._open_with_retry() if max_size and max_size > self._max_message_size_on_link: raise ValueError('Max message size: {} is too large, acceptable max batch size is: {} bytes.' diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index f0e4eca2ac47..347b1263be2c 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -6,6 +6,7 @@ import logging import datetime +import time import functools import threading @@ -102,21 +103,29 @@ def _create_auth(self, username=None, password=None): get_jwt_token, http_proxy=http_proxy, transport_type=transport_type) - def _handle_exception(self, exception, retry_count, max_retries): - _handle_exception(exception, retry_count, max_retries, self) - def _close_connection(self): self._conn_manager.reset_connection_if_broken() + def _try_delay(self, retried_times, last_exception, timeout_time=None, entity_name=None): + entity_name = entity_name or self.container_id + backoff = self.config.backoff_factor * 2 ** retried_times + if backoff <= self.config.backoff_max and ( + timeout_time is None or time.time() + backoff <= timeout_time): # pylint:disable=no-else-return + time.sleep(backoff) + log.info("%r has an exception (%r). Retrying...", format(entity_name), last_exception) + else: + log.info("%r operation has timed out. Last exception before timeout is (%r)", + entity_name, last_exception) + raise last_exception + def _management_request(self, mgmt_msg, op_type): alt_creds = { "username": self._auth_config.get("iot_username"), "password": self._auth_config.get("iot_password") } - max_retries = self.config.max_retries - retry_count = 0 - while retry_count <= self.config.max_retries: + retried_times = 0 + while retried_times <= self.config.max_retries: mgmt_auth = self._create_auth(**alt_creds) mgmt_client = uamqp.AMQPClient(self.mgmt_target) try: @@ -130,8 +139,9 @@ def _management_request(self, mgmt_msg, op_type): description_fields=b'status-description') return response except Exception as exception: # pylint: disable=broad-except - self._handle_exception(exception, retry_count, max_retries) - retry_count += 1 + last_exception = _handle_exception(exception, self) + self._try_delay(retried_times=retried_times, last_exception=last_exception) + retried_times += 1 finally: mgmt_client.close() @@ -144,7 +154,7 @@ def _iothub_redirect(self): event_position=EventPosition('-1'), operation='/messages/events') with self._redirect_consumer: - self._redirect_consumer._open_with_retry(timeout=self.config.receive_timeout) # pylint: disable=protected-access + self._redirect_consumer._open_with_retry() # pylint: disable=protected-access self._redirect_consumer = None def get_properties(self): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py index 6306c750260a..82550bf3b9e5 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py @@ -14,7 +14,7 @@ from azure.eventhub.common import EventData, EventPosition from azure.eventhub.error import _error_handler -from ._consumer_producer_mixin import ConsumerProducerMixin, _retry_decorator +from ._consumer_producer_mixin import ConsumerProducerMixin log = logging.getLogger(__name__) @@ -88,9 +88,8 @@ def __iter__(self): return self def __next__(self): - max_retries = self.client.config.max_retries - retry_count = 0 - while True: + retried_times = 0 + while retried_times < self.client.config.max_retries: try: self._open() if not self.messages_iter: @@ -98,11 +97,13 @@ def __next__(self): message = next(self.messages_iter) event_data = EventData._from_message(message) # pylint:disable=protected-access self.offset = EventPosition(event_data.offset, inclusive=False) - retry_count = 0 + retried_times = 0 return event_data except Exception as exception: # pylint:disable=broad-except - self._handle_exception(exception, retry_count, max_retries, timeout_time=None) - retry_count += 1 + last_exception = self._handle_exception(exception) + self.client._try_delay(retried_times=retried_times, last_exception=last_exception, + entity_name=self.name) + retried_times += 1 def _create_handler(self): alt_creds = { @@ -131,7 +132,7 @@ def _redirect(self, redirect): self.messages_iter = None super(EventHubConsumer, self)._redirect(redirect) - def _open(self, timeout_time=None, **kwargs): + def _open(self): """ Open the EventHubConsumer using the supplied connection. If the handler has previously been redirected, the redirect @@ -144,17 +145,16 @@ def _open(self, timeout_time=None, **kwargs): if not self.running and self.redirected: self.client._process_redirect_uri(self.redirected) self.source = self.redirected.address - super(EventHubConsumer, self)._open(timeout_time) + super(EventHubConsumer, self)._open() - @_retry_decorator - def _open_with_retry(self, timeout_time=None, **kwargs): - return self._open(timeout_time=timeout_time, **kwargs) + def _open_with_retry(self): + return self._do_retryable_operation(self._open, operation_need_param=False) def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): last_exception = kwargs.get("last_exception") data_batch = kwargs.get("data_batch") - self._open(timeout_time) + self._open() remaining_time = timeout_time - time.time() if remaining_time <= 0.0: if last_exception: @@ -171,9 +171,9 @@ def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): data_batch.append(event_data) return data_batch - @_retry_decorator - def _receive_with_try(self, timeout_time=None, max_batch_size=None, **kwargs): - return self._receive(timeout_time=timeout_time, max_batch_size=max_batch_size, **kwargs) + def _receive_with_retry(self, timeout=None, max_batch_size=None, **kwargs): + return self._do_retryable_operation(self._receive, timeout=timeout, + max_batch_size=max_batch_size, **kwargs) @property def queue_size(self): @@ -221,7 +221,7 @@ def receive(self, max_batch_size=None, timeout=None): max_batch_size = max_batch_size or min(self.client.config.max_batch_size, self.prefetch) data_batch = [] # type: List[EventData] - return self._receive_with_try(timeout=timeout, max_batch_size=max_batch_size, data_batch=data_batch) + return self._receive_with_retry(timeout=timeout, max_batch_size=max_batch_size, data_batch=data_batch) def close(self, exception=None): # type:(Exception) -> None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py index 72b11f5478ad..6db54e5977de 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py @@ -157,10 +157,10 @@ def _create_eventhub_exception(exception): return error -def _handle_exception(exception, retry_count, max_retries, closable, timeout_time=None): # pylint:disable=too-many-branches, too-many-statements - try: +def _handle_exception(exception, closable): # pylint:disable=too-many-branches, too-many-statements + try: # closable is a producer/consumer object name = closable.name - except AttributeError: + except AttributeError: # closable is an client object name = closable.container_id if isinstance(exception, KeyboardInterrupt): # pylint:disable=no-else-raise log.info("%r stops due to keyboard interrupt", name) @@ -184,10 +184,6 @@ def _handle_exception(exception, retry_count, max_retries, closable, timeout_tim log.info("%r Event data send error (%r)", name, exception) error = EventDataSendError(str(exception), exception) raise error - elif retry_count >= max_retries: - error = _create_eventhub_exception(exception) - log.info("%r has exhausted retry. Exception still occurs (%r)", name, exception) - raise error else: if isinstance(exception, errors.AuthenticationException): if hasattr(closable, "_close_connection"): @@ -214,20 +210,4 @@ def _handle_exception(exception, retry_count, max_retries, closable, timeout_tim else: if hasattr(closable, "_close_connection"): closable._close_connection() # pylint:disable=protected-access - # start processing retry delay - try: - backoff_factor = closable.client.config.backoff_factor - backoff_max = closable.client.config.backoff_max - except AttributeError: - backoff_factor = closable.config.backoff_factor - backoff_max = closable.config.backoff_max - backoff = backoff_factor * 2 ** retry_count - if backoff <= backoff_max and (timeout_time is None or time.time() + backoff <= timeout_time): #pylint:disable=no-else-return - time.sleep(backoff) - log.info("%r has an exception (%r). Retrying...", format(name), exception) - return _create_eventhub_exception(exception) - else: - error = _create_eventhub_exception(exception) - log.info("%r operation has timed out. Last exception before timeout is (%r)", name, error) - raise error - # end of processing retry delay + return _create_eventhub_exception(exception) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py index a36541475ceb..c019a30ee7b8 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py @@ -14,7 +14,7 @@ from azure.eventhub.common import EventData, EventDataBatch from azure.eventhub.error import _error_handler, OperationTimeoutError, EventDataError -from ._consumer_producer_mixin import ConsumerProducerMixin, _retry_decorator +from ._consumer_producer_mixin import ConsumerProducerMixin log = logging.getLogger(__name__) @@ -104,26 +104,24 @@ def _create_handler(self): link_properties=self._link_properties, properties=self.client._create_properties(self.client.config.user_agent)) # pylint: disable=protected-access - def _open(self, timeout_time=None, **kwargs): # pylint:disable=unused-argument, arguments-differ # TODO:To refactor + def _open(self): """ Open the EventHubProducer using the supplied connection. If the handler has previously been redirected, the redirect context will be used to create a new handler before opening it. """ - if not self.running and self.redirected: self.client._process_redirect_uri(self.redirected) # pylint: disable=protected-access self.target = self.redirected.address - super(EventHubProducer, self)._open(timeout_time) + super(EventHubProducer, self)._open() - @_retry_decorator - def _open_with_retry(self, timeout_time=None, **kwargs): - return self._open(timeout_time=timeout_time, **kwargs) + def _open_with_retry(self): + return self._do_retryable_operation(self._open, operation_need_param=False) def _send_event_data(self, timeout_time=None, last_exception=None): if self.unsent_events: - self._open(timeout_time) + self._open() remaining_time = timeout_time - time.time() if remaining_time <= 0.0: if last_exception: @@ -141,9 +139,8 @@ def _send_event_data(self, timeout_time=None, last_exception=None): self._condition = OperationTimeoutError("send operation timed out") _error(self._outcome, self._condition) - @_retry_decorator - def _send_event_data_with_retry(self, timeout_time=None, last_exception=None): - return self._send_event_data(timeout_time=timeout_time, last_exception=last_exception) + def _send_event_data_with_retry(self, timeout=None): + return self._do_retryable_operation(self._send_event_data, timeout=timeout) def _on_outcome(self, outcome, condition): """ @@ -182,7 +179,7 @@ def create_batch(self, max_size=None, partition_key=None): """ if not self._max_message_size_on_link: - self._open_with_retry(timeout=self.client.config.send_timeout) + self._open_with_retry() if max_size and max_size > self._max_message_size_on_link: raise ValueError('Max message size: {} is too large, acceptable max batch size is: {} bytes.' @@ -237,7 +234,7 @@ def send(self, event_data, partition_key=None, timeout=None): wrapper_event_data = EventDataBatch._from_batch(event_data, partition_key) # pylint: disable=protected-access wrapper_event_data.message.on_send_complete = self._on_outcome self.unsent_events = [wrapper_event_data.message] - self._send_event_data_with_retry(timeout=timeout) # pylint:disable=unexpected-keyword-arg # TODO:to refactor + self._send_event_data_with_retry(timeout=timeout) def close(self, exception=None): # pylint:disable=useless-super-delegation # type:(Exception) -> None From 77621302c7e473979840af9b18f37f5f86efa24e Mon Sep 17 00:00:00 2001 From: yijxie Date: Tue, 3 Sep 2019 14:22:34 -0700 Subject: [PATCH 26/51] add system_properties to EventData --- sdk/eventhub/azure-eventhubs/azure/eventhub/common.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index 3979463eef42..11668ba367f0 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -200,6 +200,10 @@ def application_properties(self, value): properties = None if value is None else dict(self._app_properties) self.message.application_properties = properties + @property + def system_properties(self): + return self._annotations + @property def body(self): """ From 1b10d00b8f803a63bad6d16c098bfcf4d9229111 Mon Sep 17 00:00:00 2001 From: yijxie Date: Tue, 3 Sep 2019 23:32:00 -0700 Subject: [PATCH 27/51] Fix a small bug --- sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index 10756da08701..23aefaf2aa3e 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -109,7 +109,7 @@ async def _try_delay(self, retried_times, last_exception, timeout_time=None, ent backoff = self.config.backoff_factor * 2 ** retried_times if backoff <= self.config.backoff_max and ( timeout_time is None or time.time() + backoff <= timeout_time): # pylint:disable=no-else-return - asyncio.sleep(backoff) + await asyncio.sleep(backoff) log.info("%r has an exception (%r). Retrying...", format(entity_name), last_exception) else: log.info("%r operation has timed out. Last exception before timeout is (%r)", From 13237b51ad8197435a7fc5ae9e57cc0f89efac45 Mon Sep 17 00:00:00 2001 From: yijxie Date: Tue, 3 Sep 2019 23:46:04 -0700 Subject: [PATCH 28/51] Refine example code --- .../azure-eventhubs/examples/__init__.py | 17 ------- .../async_examples/iterator_receiver_async.py | 30 +++-------- .../examples/async_examples/recv_async.py | 37 +++++--------- .../{ => async_examples}/recv_owner_level.py | 33 +++++------- .../examples/async_examples/send_async.py | 44 ++++++---------- .../test_examples_eventhub_async.py | 5 -- .../azure-eventhubs/examples/batch_send.py | 50 ------------------ .../examples/client_secret_auth.py | 28 +++------- .../azure-eventhubs/examples/iothub_recv.py | 11 ++-- .../azure-eventhubs/examples/iothub_send.py | 15 ++---- .../azure-eventhubs/examples/proxy.py | 31 +++-------- sdk/eventhub/azure-eventhubs/examples/recv.py | 47 ++++++++--------- .../azure-eventhubs/examples/recv_batch.py | 44 +++++++--------- ...{iterator_receiver.py => recv_iterator.py} | 40 ++++----------- sdk/eventhub/azure-eventhubs/examples/send.py | 51 ++++++------------- ...data_batch.py => send_event_data_batch.py} | 45 +++++----------- .../examples/send_list_of_event_data.py | 33 ++++++++++++ .../examples/test_examples_eventhub.py | 5 -- 18 files changed, 181 insertions(+), 385 deletions(-) rename sdk/eventhub/azure-eventhubs/examples/{ => async_examples}/recv_owner_level.py (56%) delete mode 100644 sdk/eventhub/azure-eventhubs/examples/batch_send.py rename sdk/eventhub/azure-eventhubs/examples/{iterator_receiver.py => recv_iterator.py} (51%) rename sdk/eventhub/azure-eventhubs/examples/{event_data_batch.py => send_event_data_batch.py} (50%) create mode 100644 sdk/eventhub/azure-eventhubs/examples/send_list_of_event_data.py diff --git a/sdk/eventhub/azure-eventhubs/examples/__init__.py b/sdk/eventhub/azure-eventhubs/examples/__init__.py index 94facc8618df..34913fb394d7 100644 --- a/sdk/eventhub/azure-eventhubs/examples/__init__.py +++ b/sdk/eventhub/azure-eventhubs/examples/__init__.py @@ -2,20 +2,3 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- - -import sys -import logging - -def get_logger(level): - azure_logger = logging.getLogger("azure.eventhub") - azure_logger.setLevel(level) - handler = logging.StreamHandler(stream=sys.stdout) - handler.setFormatter(logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')) - if not azure_logger.handlers: - azure_logger.addHandler(handler) - - uamqp_logger = logging.getLogger("uamqp") - uamqp_logger.setLevel(logging.INFO) - if not uamqp_logger.handlers: - uamqp_logger.addHandler(handler) - return azure_logger diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py index fb726d854b8c..53e73228032e 100644 --- a/sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py @@ -10,41 +10,27 @@ """ import os -import time -import logging import asyncio from azure.eventhub.aio import EventHubClient -from azure.eventhub import EventPosition, EventHubSharedKeyCredential, EventData +from azure.eventhub import EventPosition, EventHubSharedKeyCredential -import examples -logger = examples.get_logger(logging.INFO) - - -HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net -EVENT_HUB = os.environ.get('EVENT_HUB_NAME') - -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') +HOSTNAME = os.environ['EVENT_HUB_HOSTNAME'] # .servicebus.windows.net +EVENT_HUB = os.environ['EVENT_HUB_NAME'] +USER = os.environ['EVENT_HUB_SAS_POLICY'] +KEY = os.environ['EVENT_HUB_SAS_KEY'] EVENT_POSITION = EventPosition("-1") -async def iter_consumer(consumer): - async with consumer: - async for item in consumer: - print(item) - - async def main(): - if not HOSTNAME: - raise ValueError("No EventHubs URL supplied.") client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EVENT_POSITION) - await iter_consumer(consumer) + async with consumer: + async for item in consumer: + print(item) if __name__ == '__main__': loop = asyncio.get_event_loop() loop.run_until_complete(main()) - diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py index 9f59c0ea7ab6..ba6bf68a9258 100644 --- a/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py @@ -11,21 +11,15 @@ import os import time -import logging import asyncio from azure.eventhub.aio import EventHubClient from azure.eventhub import EventPosition, EventHubSharedKeyCredential -import examples -logger = examples.get_logger(logging.INFO) - - -HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net -EVENT_HUB = os.environ.get('EVENT_HUB_NAME') - -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') +HOSTNAME = os.environ['EVENT_HUB_HOSTNAME'] # .servicebus.windows.net +EVENT_HUB = os.environ['EVENT_HUB_NAME'] +USER = os.environ['EVENT_HUB_SAS_POLICY'] +KEY = os.environ['EVENT_HUB_SAS_KEY'] EVENT_POSITION = EventPosition("-1") @@ -44,18 +38,11 @@ async def pump(client, partition): run_time = end_time - start_time print("Received {} messages in {} seconds".format(total, run_time)) -try: - if not HOSTNAME: - raise ValueError("No EventHubs URL supplied.") - - loop = asyncio.get_event_loop() - client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), - network_tracing=False) - tasks = [ - asyncio.ensure_future(pump(client, "0")), - asyncio.ensure_future(pump(client, "1"))] - loop.run_until_complete(asyncio.wait(tasks)) - loop.close() - -except KeyboardInterrupt: - pass + +loop = asyncio.get_event_loop() +client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), + network_tracing=False) +tasks = [ + asyncio.ensure_future(pump(client, "0")), + asyncio.ensure_future(pump(client, "1"))] +loop.run_until_complete(asyncio.wait(tasks)) diff --git a/sdk/eventhub/azure-eventhubs/examples/recv_owner_level.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_owner_level.py similarity index 56% rename from sdk/eventhub/azure-eventhubs/examples/recv_owner_level.py rename to sdk/eventhub/azure-eventhubs/examples/async_examples/recv_owner_level.py index 4217874771ad..384d914ad436 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv_owner_level.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_owner_level.py @@ -11,46 +11,37 @@ import os import time -import logging import asyncio from azure.eventhub.aio import EventHubClient from azure.eventhub import EventHubSharedKeyCredential, EventPosition -import examples -logger = examples.get_logger(logging.INFO) +HOSTNAME = os.environ['EVENT_HUB_HOSTNAME'] # .servicebus.windows.net +EVENT_HUB = os.environ['EVENT_HUB_NAME'] -HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net -EVENT_HUB = os.environ.get('EVENT_HUB_NAME') - -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') +USER = os.environ['EVENT_HUB_SAS_POLICY'] +KEY = os.environ['EVENT_HUB_SAS_KEY'] PARTITION = "0" async def pump(client, owner_level): - consumer = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EventPosition("-1"), owner_level=owner_level) + consumer = client.create_consumer( + consumer_group="$default", partition_id=PARTITION, event_position=EventPosition("-1"), owner_level=owner_level + ) async with consumer: total = 0 start_time = time.time() for event_data in await consumer.receive(timeout=5): last_offset = event_data.offset last_sn = event_data.sequence_number + print("Received: {}, {}".format(last_offset, last_sn)) total += 1 end_time = time.time() run_time = end_time - start_time print("Received {} messages in {} seconds".format(total, run_time)) -try: - if not HOSTNAME: - raise ValueError("No EventHubs URL supplied.") - - loop = asyncio.get_event_loop() - client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), - network_tracing=False) - loop.run_until_complete(pump(client, 20)) - loop.close() - -except KeyboardInterrupt: - pass +loop = asyncio.get_event_loop() +client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), + network_tracing=False) +loop.run_until_complete(pump(client, 20)) diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py index ffae6787628d..ac9ad098fee5 100644 --- a/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py @@ -11,7 +11,6 @@ # pylint: disable=C0111 -import logging import time import asyncio import os @@ -19,14 +18,11 @@ from azure.eventhub.aio import EventHubClient from azure.eventhub import EventData, EventHubSharedKeyCredential -import examples -logger = examples.get_logger(logging.INFO) +HOSTNAME = os.environ['EVENT_HUB_HOSTNAME'] # .servicebus.windows.net +EVENT_HUB = os.environ['EVENT_HUB_NAME'] -HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net -EVENT_HUB = os.environ.get('EVENT_HUB_NAME') - -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') +USER = os.environ['EVENT_HUB_SAS_POLICY'] +KEY = os.environ['EVENT_HUB_SAS_KEY'] async def run(client): @@ -37,26 +33,18 @@ async def run(client): async def send(producer, count): async with producer: for i in range(count): - logger.info("Sending message: {}".format(i)) + print("Sending message: {}".format(i)) data = EventData(str(i)) await producer.send(data) -try: - if not HOSTNAME: - raise ValueError("No EventHubs URL supplied.") - - loop = asyncio.get_event_loop() - client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), - network_tracing=False) - tasks = asyncio.gather( - run(client), - run(client)) - start_time = time.time() - loop.run_until_complete(tasks) - end_time = time.time() - run_time = end_time - start_time - logger.info("Runtime: {} seconds".format(run_time)) - loop.close() - -except KeyboardInterrupt: - pass +loop = asyncio.get_event_loop() +client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), + network_tracing=False) +tasks = asyncio.gather( + run(client), + run(client)) +start_time = time.time() +loop.run_until_complete(tasks) +end_time = time.time() +run_time = end_time - start_time +print("Runtime: {} seconds".format(run_time)) diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py index 896f2a007b21..eeb20b3594af 100644 --- a/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py @@ -5,14 +5,9 @@ #-------------------------------------------------------------------------- import pytest -import datetime -import os -import time import logging import asyncio -from azure.eventhub import EventHubError, EventData - @pytest.mark.asyncio async def test_example_eventhub_async_send_and_receive(live_eventhub_config): diff --git a/sdk/eventhub/azure-eventhubs/examples/batch_send.py b/sdk/eventhub/azure-eventhubs/examples/batch_send.py deleted file mode 100644 index 3801682f7914..000000000000 --- a/sdk/eventhub/azure-eventhubs/examples/batch_send.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python - -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- - -""" -An example to show batch sending events to an Event Hub. -""" - -# pylint: disable=C0111 - -import logging -import time -import os - -from azure.eventhub import EventData, EventHubClient, EventHubSharedKeyCredential - - -import examples -logger = examples.get_logger(logging.INFO) - -HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net -EVENT_HUB = os.environ.get('EVENT_HUB_NAME') - -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') - - -try: - if not HOSTNAME: - raise ValueError("No EventHubs URL supplied.") - - client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) - producer = client.create_producer(partition_id="1") - - event_list = [] - for i in range(1500): - event_list.append(EventData('Hello World')) - - with producer: - start_time = time.time() - producer.send(event_list) - end_time = time.time() - run_time = end_time - start_time - logger.info("Runtime: {} seconds".format(run_time)) - -except KeyboardInterrupt: - pass diff --git a/sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py b/sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py index 6c3202162872..079469b89518 100644 --- a/sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py +++ b/sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py @@ -9,22 +9,16 @@ """ import os -import time -import logging - from azure.eventhub import EventHubClient from azure.eventhub import EventData from azure.identity import ClientSecretCredential -import examples -logger = examples.get_logger(logging.INFO) - -HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net -EVENT_HUB = os.environ.get('EVENT_HUB_NAME') +HOSTNAME = os.environ['EVENT_HUB_HOSTNAME'] # .servicebus.windows.net +EVENT_HUB = os.environ['EVENT_HUB_NAME'] -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') +USER = os.environ['EVENT_HUB_SAS_POLICY'] +KEY = os.environ['EVENT_HUB_SAS_KEY'] CLIENT_ID = os.environ.get('AAD_CLIENT_ID') SECRET = os.environ.get('AAD_SECRET') @@ -35,14 +29,8 @@ client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=credential) -try: - producer = client.create_producer(partition_id='0') - - with producer: - event = EventData(body='A single message') - producer.send(event) -except KeyboardInterrupt: - pass -except Exception as e: - print(e) +producer = client.create_producer(partition_id='0') +with producer: + event = EventData(body='A single message') + producer.send(event) diff --git a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py index 08a1c5af32ad..0542f9c82b9c 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py +++ b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py @@ -9,20 +9,17 @@ An example to show receiving events from an IoT Hub partition. """ import os -import logging from azure.eventhub import EventHubClient, EventPosition - -logger = logging.getLogger('azure.eventhub') - iot_connection_str = os.environ['IOTHUB_CONNECTION_STR'] client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) -consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') +consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), + operation='/messages/events') with consumer: received = consumer.receive(timeout=5) print(received) - eh_info = client.get_properties() - print(eh_info) +eh_info = client.get_properties() +print(eh_info) diff --git a/sdk/eventhub/azure-eventhubs/examples/iothub_send.py b/sdk/eventhub/azure-eventhubs/examples/iothub_send.py index 06d35b102647..c2f8f3379259 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iothub_send.py +++ b/sdk/eventhub/azure-eventhubs/examples/iothub_send.py @@ -9,21 +9,12 @@ An example to show receiving events from an IoT Hub partition. """ import os -import logging - from azure.eventhub import EventData, EventHubClient - -logger = logging.getLogger('azure.eventhub') - iot_device_id = os.environ['IOTHUB_DEVICE'] iot_connection_str = os.environ['IOTHUB_CONNECTION_STR'] client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) -try: - producer = client.create_producer(operation='/messages/devicebound') - with producer: - producer.send(EventData(b"A single event", to_device=iot_device_id)) - -except KeyboardInterrupt: - pass +producer = client.create_producer(operation='/messages/devicebound') +with producer: + producer.send(EventData(b"A single event", to_device=iot_device_id)) diff --git a/sdk/eventhub/azure-eventhubs/examples/proxy.py b/sdk/eventhub/azure-eventhubs/examples/proxy.py index 0af2dda1a6ac..61005c347333 100644 --- a/sdk/eventhub/azure-eventhubs/examples/proxy.py +++ b/sdk/eventhub/azure-eventhubs/examples/proxy.py @@ -9,20 +9,15 @@ An example to show sending and receiving events behind a proxy """ import os -import logging - from azure.eventhub import EventHubClient, EventPosition, EventData, EventHubSharedKeyCredential -import examples -logger = examples.get_logger(logging.INFO) - # Hostname can be .servicebus.windows.net" -HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') -EVENT_HUB = os.environ.get('EVENT_HUB_NAME') +HOSTNAME = os.environ['EVENT_HUB_HOSTNAME'] +EVENT_HUB = os.environ['EVENT_HUB_NAME'] -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') +USER = os.environ['EVENT_HUB_SAS_POLICY'] +KEY = os.environ['EVENT_HUB_SAS_KEY'] EVENT_POSITION = EventPosition("-1") PARTITION = "0" @@ -33,29 +28,19 @@ 'password': '123456' # password used for proxy authentication if needed } - -if not HOSTNAME: - raise ValueError("No EventHubs URL supplied.") - -client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False, http_proxy=HTTP_PROXY) +client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), + network_tracing=False, http_proxy=HTTP_PROXY) +producer = client.create_producer(partition_id=PARTITION) +consumer = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EVENT_POSITION) try: - producer = client.create_producer(partition_id=PARTITION) - consumer = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EVENT_POSITION) - consumer.receive(timeout=1) - event_list = [] for i in range(20): event_list.append(EventData("Event Number {}".format(i))) - print('Start sending events behind a proxy.') - producer.send(event_list) - print('Start receiving events behind a proxy.') - received = consumer.receive(max_batch_size=50, timeout=5) finally: producer.close() consumer.close() - diff --git a/sdk/eventhub/azure-eventhubs/examples/recv.py b/sdk/eventhub/azure-eventhubs/examples/recv.py index 4f64b40e095c..68f0b5c214e8 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv.py @@ -9,18 +9,14 @@ An example to show receiving events from an Event Hub partition. """ import os -import logging import time from azure.eventhub import EventHubClient, EventPosition, EventHubSharedKeyCredential -import examples -logger = examples.get_logger(logging.INFO) +HOSTNAME = os.environ['EVENT_HUB_HOSTNAME'] # .servicebus.windows.net +EVENT_HUB = os.environ['EVENT_HUB_NAME'] -HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net -EVENT_HUB = os.environ.get('EVENT_HUB_NAME') - -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') +USER = os.environ['EVENT_HUB_SAS_POLICY'] +KEY = os.environ['EVENT_HUB_SAS_KEY'] EVENT_POSITION = EventPosition("-1") PARTITION = "0" @@ -32,23 +28,20 @@ client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) -try: - consumer = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EVENT_POSITION, prefetch=5000) - with consumer: - start_time = time.time() +consumer = client.create_consumer(consumer_group="$default", partition_id=PARTITION, + event_position=EVENT_POSITION, prefetch=5000) +with consumer: + start_time = time.time() + batch = consumer.receive(timeout=5) + while batch: + for event_data in batch: + last_offset = event_data.offset + last_sn = event_data.sequence_number + print("Received: {}, {}".format(last_offset, last_sn)) + print(event_data.body_as_str()) + total += 1 batch = consumer.receive(timeout=5) - while batch: - for event_data in batch: - last_offset = event_data.offset - last_sn = event_data.sequence_number - print("Received: {}, {}".format(last_offset, last_sn)) - print(event_data.body_as_str()) - total += 1 - batch = consumer.receive(timeout=5) - - end_time = time.time() - run_time = end_time - start_time - print("Received {} messages in {} seconds".format(total, run_time)) - -except KeyboardInterrupt: - pass + + end_time = time.time() + run_time = end_time - start_time + print("Received {} messages in {} seconds".format(total, run_time)) diff --git a/sdk/eventhub/azure-eventhubs/examples/recv_batch.py b/sdk/eventhub/azure-eventhubs/examples/recv_batch.py index 9b9edcd03a84..e3255ebe1c3f 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv_batch.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv_batch.py @@ -11,41 +11,31 @@ """ import os -import logging - from azure.eventhub import EventHubClient, EventPosition, EventHubSharedKeyCredential -import examples -logger = examples.get_logger(logging.INFO) - -HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net -EVENT_HUB = os.environ.get('EVENT_HUB_NAME') - -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') +HOSTNAME = os.environ['EVENT_HUB_HOSTNAME'] # .servicebus.windows.net +EVENT_HUB = os.environ['EVENT_HUB_NAME'] +USER = os.environ['EVENT_HUB_SAS_POLICY'] +KEY = os.environ['EVENT_HUB_SAS_KEY'] EVENT_POSITION = EventPosition("-1") PARTITION = "0" - total = 0 last_sn = -1 last_offset = "-1" client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) -try: - consumer = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EVENT_POSITION, prefetch=100) - with consumer: - batched_events = consumer.receive(max_batch_size=10) - for event_data in batched_events: - last_offset = event_data.offset - last_sn = event_data.sequence_number - total += 1 - print("Partition {}, Received {}, sn={} offset={}".format( - PARTITION, - total, - last_sn, - last_offset)) - -except KeyboardInterrupt: - pass +consumer = client.create_consumer(consumer_group="$default", partition_id=PARTITION, + event_position=EVENT_POSITION, prefetch=100) +with consumer: + batched_events = consumer.receive(max_batch_size=10) + for event_data in batched_events: + last_offset = event_data.offset + last_sn = event_data.sequence_number + total += 1 + print("Partition {}, Received {}, sn={} offset={}".format( + PARTITION, + total, + last_sn, + last_offset)) diff --git a/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py b/sdk/eventhub/azure-eventhubs/examples/recv_iterator.py similarity index 51% rename from sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py rename to sdk/eventhub/azure-eventhubs/examples/recv_iterator.py index 31f5b804cba3..45068ae2c1ef 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv_iterator.py @@ -5,40 +5,22 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -from threading import Thread import os -import time -import logging - from azure.eventhub import EventHubClient, EventPosition, EventHubSharedKeyCredential, EventData -import examples -logger = examples.get_logger(logging.INFO) - - -HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net -EVENT_HUB = os.environ.get('EVENT_HUB_NAME') - -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') - +HOSTNAME = os.environ['EVENT_HUB_HOSTNAME'] # .servicebus.windows.net +EVENT_HUB = os.environ['EVENT_HUB_NAME'] +USER = os.environ['EVENT_HUB_SAS_POLICY'] +KEY = os.environ['EVENT_HUB_SAS_KEY'] EVENT_POSITION = EventPosition("-1") -class PartitionConsumerThread(Thread): - def __init__(self, consumer): - Thread.__init__(self) - self.consumer = consumer - - def run(self): - with consumer: - for item in self.consumer: - print(item) - - client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), - network_tracing=False) + network_tracing=False) consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EVENT_POSITION) - -thread = PartitionConsumerThread(consumer) -thread.start() +try: + with consumer: + for item in consumer: + print(item) +except KeyboardInterrupt: + print("Iterator stopped") diff --git a/sdk/eventhub/azure-eventhubs/examples/send.py b/sdk/eventhub/azure-eventhubs/examples/send.py index a1a791a4d5af..d559989c69a8 100644 --- a/sdk/eventhub/azure-eventhubs/examples/send.py +++ b/sdk/eventhub/azure-eventhubs/examples/send.py @@ -13,44 +13,23 @@ # pylint: disable=C0111 -import logging import time import os - from azure.eventhub import EventHubClient, EventData, EventHubSharedKeyCredential -import examples -logger = examples.get_logger(logging.INFO) - - -HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net -EVENT_HUB = os.environ.get('EVENT_HUB_NAME') - -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') - -try: - if not HOSTNAME: - raise ValueError("No EventHubs URL supplied.") - - client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), - network_tracing=False) - producer = client.create_producer(partition_id="0") - - try: - start_time = time.time() - with producer: - # not performance optimal, but works. Please do send events in batch to get much better performance. - for i in range(100): - ed = EventData("msg") - logger.info("Sending message: {}".format(i)) - producer.send(ed) - except: - raise - finally: - end_time = time.time() - run_time = end_time - start_time - logger.info("Runtime: {} seconds".format(run_time)) -except KeyboardInterrupt: - pass +HOSTNAME = os.environ['EVENT_HUB_HOSTNAME'] # .servicebus.windows.net +EVENT_HUB = os.environ['EVENT_HUB_NAME'] +USER = os.environ['EVENT_HUB_SAS_POLICY'] +KEY = os.environ['EVENT_HUB_SAS_KEY'] + +client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), + network_tracing=False) +producer = client.create_producer(partition_id="0") +start_time = time.time() +with producer: + # not performance optimal, but works. Please do send events in batch to get much better performance. + for i in range(100): + ed = EventData("msg") + print("Sending message: {}".format(i)) + producer.send(ed) # please use batch_send for better performance. Refer to event_data_batch.py diff --git a/sdk/eventhub/azure-eventhubs/examples/event_data_batch.py b/sdk/eventhub/azure-eventhubs/examples/send_event_data_batch.py similarity index 50% rename from sdk/eventhub/azure-eventhubs/examples/event_data_batch.py rename to sdk/eventhub/azure-eventhubs/examples/send_event_data_batch.py index 3cf6dc88f177..3ccaaf0a4fca 100644 --- a/sdk/eventhub/azure-eventhubs/examples/event_data_batch.py +++ b/sdk/eventhub/azure-eventhubs/examples/send_event_data_batch.py @@ -11,21 +11,16 @@ # pylint: disable=C0111 -import logging import time import os - from azure.eventhub import EventHubClient, EventData, EventHubSharedKeyCredential -import examples -logger = examples.get_logger(logging.INFO) - -HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net -EVENT_HUB = os.environ.get('EVENT_HUB_NAME') +HOSTNAME = os.environ['EVENT_HUB_HOSTNAME'] # .servicebus.windows.net +EVENT_HUB = os.environ['EVENT_HUB_NAME'] -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') +USER = os.environ['EVENT_HUB_SAS_POLICY'] +KEY = os.environ['EVENT_HUB_SAS_KEY'] def create_batch_data(producer): @@ -40,25 +35,13 @@ def create_batch_data(producer): return event_data_batch -try: - if not HOSTNAME: - raise ValueError("No EventHubs URL supplied.") - - client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), - network_tracing=False) - producer = client.create_producer() - - try: - start_time = time.time() - with producer: - event_data_batch = create_batch_data(producer) - producer.send(event_data_batch) - except: - raise - finally: - end_time = time.time() - run_time = end_time - start_time - logger.info("Runtime: {} seconds".format(run_time)) - -except KeyboardInterrupt: - pass +client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), + network_tracing=False) +producer = client.create_producer() +start_time = time.time() +with producer: + event_data_batch = create_batch_data(producer) + producer.send(event_data_batch) +end_time = time.time() +run_time = end_time - start_time +print("Runtime: {} seconds".format(run_time)) diff --git a/sdk/eventhub/azure-eventhubs/examples/send_list_of_event_data.py b/sdk/eventhub/azure-eventhubs/examples/send_list_of_event_data.py new file mode 100644 index 000000000000..74441d0f5bc3 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/examples/send_list_of_event_data.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +An example to show batch sending events to an Event Hub. +""" + +# pylint: disable=C0111 + +import time +import os +from azure.eventhub import EventData, EventHubClient, EventHubSharedKeyCredential + + +HOSTNAME = os.environ['EVENT_HUB_HOSTNAME'] # .servicebus.windows.net +EVENT_HUB = os.environ['EVENT_HUB_NAME'] +USER = os.environ['EVENT_HUB_SAS_POLICY'] +KEY = os.environ['EVENT_HUB_SAS_KEY'] + +client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), + network_tracing=False) +producer = client.create_producer(partition_id="1") + +event_list = [] +for i in range(1500): + event_list.append(EventData('Hello World')) +with producer: + start_time = time.time() + producer.send(event_list) diff --git a/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py b/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py index cf8943ef1e6a..52145f9222f0 100644 --- a/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py +++ b/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py @@ -4,14 +4,9 @@ # license information. #-------------------------------------------------------------------------- -import pytest -import datetime -import os import time import logging -from azure.eventhub import EventHubError - def create_eventhub_client(live_eventhub_config): # [START create_eventhub_client] From 998eeeda388c7f9c1e87420702963b9467220e05 Mon Sep 17 00:00:00 2001 From: "Adam Ling (MSFT)" <47871814+yunhaoling@users.noreply.github.com> Date: Wed, 4 Sep 2019 14:04:00 -0700 Subject: [PATCH 29/51] Update receive method (#7064) --- .../azure-eventhubs/azure/eventhub/aio/consumer_async.py | 5 ++--- sdk/eventhub/azure-eventhubs/azure/eventhub/common.py | 5 +++++ sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py | 7 +++---- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py index f26853e32cac..147550c4d819 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py @@ -157,7 +157,7 @@ async def _open_with_retry(self): async def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): last_exception = kwargs.get("last_exception") - data_batch = kwargs.get("data_batch") + data_batch = [] await self._open() remaining_time = timeout_time - time.time() @@ -225,9 +225,8 @@ async def receive(self, *, max_batch_size=None, timeout=None): timeout = timeout or self.client.config.receive_timeout max_batch_size = max_batch_size or min(self.client.config.max_batch_size, self.prefetch) - data_batch = [] # type: List[EventData] - return await self._receive_with_retry(timeout=timeout, max_batch_size=max_batch_size, data_batch=data_batch) + return await self._receive_with_retry(timeout=timeout, max_batch_size=max_batch_size) async def close(self, exception=None): # type: (Exception) -> None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index 11668ba367f0..73fed892db11 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -202,6 +202,11 @@ def application_properties(self, value): @property def system_properties(self): + """ + Metadata set by the Event Hubs Service associated with the EventData + + :rtype: dict + """ return self._annotations @property diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py index 82550bf3b9e5..e10f52e61b59 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py @@ -152,7 +152,7 @@ def _open_with_retry(self): def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): last_exception = kwargs.get("last_exception") - data_batch = kwargs.get("data_batch") + data_batch = [] self._open() remaining_time = timeout_time - time.time() @@ -163,7 +163,7 @@ def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): return data_batch remaining_time_ms = 1000 * remaining_time message_batch = self._handler.receive_message_batch( - max_batch_size=max_batch_size - (len(data_batch) if data_batch else 0), + max_batch_size=max_batch_size, timeout=remaining_time_ms) for message in message_batch: event_data = EventData._from_message(message) # pylint:disable=protected-access @@ -219,9 +219,8 @@ def receive(self, max_batch_size=None, timeout=None): timeout = timeout or self.client.config.receive_timeout max_batch_size = max_batch_size or min(self.client.config.max_batch_size, self.prefetch) - data_batch = [] # type: List[EventData] - return self._receive_with_retry(timeout=timeout, max_batch_size=max_batch_size, data_batch=data_batch) + return self._receive_with_retry(timeout=timeout, max_batch_size=max_batch_size) def close(self, exception=None): # type:(Exception) -> None From e13ddee7cce4822f210a1987e04187c49917df8a Mon Sep 17 00:00:00 2001 From: "Adam Ling (MSFT)" <47871814+yunhaoling@users.noreply.github.com> Date: Fri, 6 Sep 2019 12:15:23 -0700 Subject: [PATCH 30/51] Update accessibility of class (#7091) * Fix pylint * Update accessibility of of class * Small fix in livetest * Wait longer in iothub livetest * Small updates in livetest --- .../azure/eventhub/__init__.py | 2 +- .../eventhub/_consumer_producer_mixin.py | 57 ++++---- .../aio/_consumer_producer_mixin_async.py | 59 ++++----- .../azure/eventhub/aio/client_async.py | 44 +++---- .../azure/eventhub/aio/consumer_async.py | 124 +++++++++--------- .../azure/eventhub/aio/error_async.py | 5 +- .../azure/eventhub/aio/producer_async.py | 72 +++++----- .../azure-eventhubs/azure/eventhub/client.py | 44 +++---- .../azure/eventhub/client_abstract.py | 53 ++++---- .../azure/eventhub/consumer.py | 112 ++++++++-------- .../azure-eventhubs/azure/eventhub/error.py | 5 +- .../eventprocessor/event_processor.py | 4 +- .../azure/eventhub/producer.py | 66 +++++----- .../asynctests/test_iothub_receive_async.py | 6 +- .../test_longrunning_receive_async.py | 2 +- .../tests/test_iothub_receive.py | 6 +- .../tests/test_longrunning_receive.py | 6 +- 17 files changed, 332 insertions(+), 335 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py index 040d00c947d8..dfc198f71fa8 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -__version__ = "5.0.0b2" +__version__ = "5.0.0b3" from uamqp import constants # type: ignore from azure.eventhub.common import EventData, EventDataBatch, EventPosition from azure.eventhub.error import EventHubError, EventDataError, ConnectError, \ diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py index 282a8c574088..ef32cb7a591c 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py @@ -15,9 +15,9 @@ class ConsumerProducerMixin(object): def __init__(self): - self.client = None + self._client = None self._handler = None - self.name = None + self._name = None def __enter__(self): return self @@ -26,15 +26,15 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.close(exc_val) def _check_closed(self): - if self.error: - raise EventHubError("{} has been closed. Please create a new one to handle event data.".format(self.name)) + if self._error: + raise EventHubError("{} has been closed. Please create a new one to handle event data.".format(self._name)) def _create_handler(self): pass def _redirect(self, redirect): - self.redirected = redirect - self.running = False + self._redirected = redirect + self._running = False self._close_connection() def _open(self): @@ -45,36 +45,36 @@ def _open(self): """ # pylint: disable=protected-access - if not self.running: + if not self._running: if self._handler: self._handler.close() - if self.redirected: + if self._redirected: alt_creds = { - "username": self.client._auth_config.get("iot_username"), - "password": self.client._auth_config.get("iot_password")} + "username": self._client._auth_config.get("iot_username"), + "password": self._client._auth_config.get("iot_password")} else: alt_creds = {} self._create_handler() - self._handler.open(connection=self.client._conn_manager.get_connection( # pylint: disable=protected-access - self.client.address.hostname, - self.client.get_auth(**alt_creds) + self._handler.open(connection=self._client._conn_manager.get_connection( # pylint: disable=protected-access + self._client._address.hostname, + self._client._get_auth(**alt_creds) )) while not self._handler.client_ready(): time.sleep(0.05) self._max_message_size_on_link = self._handler.message_handler._link.peer_max_message_size \ or constants.MAX_MESSAGE_LENGTH_BYTES # pylint: disable=protected-access - self.running = True + self._running = True def _close_handler(self): self._handler.close() # close the link (sharing connection) or connection (not sharing) - self.running = False + self._running = False def _close_connection(self): self._close_handler() - self.client._conn_manager.reset_connection_if_broken() # pylint: disable=protected-access + self._client._conn_manager.reset_connection_if_broken() # pylint: disable=protected-access def _handle_exception(self, exception): - if not self.running and isinstance(exception, compat.TimeoutException): + if not self._running and isinstance(exception, compat.TimeoutException): exception = errors.AuthenticationException("Authorization timeout.") return _handle_exception(exception, self) @@ -89,19 +89,18 @@ def _do_retryable_operation(self, operation, timeout=None, **kwargs): last_exception = kwargs.pop('last_exception', None) operation_need_param = kwargs.pop('operation_need_param', True) - while retried_times <= self.client.config.max_retries: + while retried_times <= self._client._config.max_retries: # pylint: disable=protected-access try: if operation_need_param: return operation(timeout_time=timeout_time, last_exception=last_exception, **kwargs) - else: - return operation() + return operation() except Exception as exception: # pylint:disable=broad-except last_exception = self._handle_exception(exception) - self.client._try_delay(retried_times=retried_times, last_exception=last_exception, - timeout_time=timeout_time, entity_name=self.name) + self._client._try_delay(retried_times=retried_times, last_exception=last_exception, + timeout_time=timeout_time, entity_name=self._name) retried_times += 1 - log.info("%r has exhausted retry. Exception still occurs (%r)", self.name, last_exception) + log.info("%r has exhausted retry. Exception still occurs (%r)", self._name, last_exception) raise last_exception def close(self, exception=None): @@ -124,16 +123,16 @@ def close(self, exception=None): :caption: Close down the handler. """ - self.running = False - if self.error: # type: ignore + self._running = False + if self._error: # type: ignore return if isinstance(exception, errors.LinkRedirect): - self.redirected = exception + self._redirected = exception elif isinstance(exception, EventHubError): - self.error = exception + self._error = exception elif exception: - self.error = EventHubError(str(exception)) + self._error = EventHubError(str(exception)) else: - self.error = EventHubError("{} handler is closed.".format(self.name)) + self._error = EventHubError("{} handler is closed.".format(self._name)) if self._handler: self._handler.close() # this will close link if sharing connection. Otherwise close connection diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py index 64873f843dc4..33c944d41be4 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py @@ -16,9 +16,9 @@ class ConsumerProducerMixin(object): def __init__(self): - self.client = None + self._client = None self._handler = None - self.name = None + self._name = None async def __aenter__(self): return self @@ -27,15 +27,15 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): await self.close(exc_val) def _check_closed(self): - if self.error: - raise EventHubError("{} has been closed. Please create a new one to handle event data.".format(self.name)) + if self._error: + raise EventHubError("{} has been closed. Please create a new one to handle event data.".format(self._name)) def _create_handler(self): pass async def _redirect(self, redirect): - self.redirected = redirect - self.running = False + self._redirected = redirect + self._running = False await self._close_connection() async def _open(self): @@ -46,36 +46,36 @@ async def _open(self): """ # pylint: disable=protected-access - if not self.running: + if not self._running: if self._handler: await self._handler.close_async() - if self.redirected: + if self._redirected: alt_creds = { - "username": self.client._auth_config.get("iot_username"), - "password": self.client._auth_config.get("iot_password")} + "username": self._client._auth_config.get("iot_username"), + "password": self._client._auth_config.get("iot_password")} else: alt_creds = {} self._create_handler() - await self._handler.open_async(connection=await self.client._conn_manager.get_connection( - self.client.address.hostname, - self.client.get_auth(**alt_creds) + await self._handler.open_async(connection=await self._client._conn_manager.get_connection( + self._client._address.hostname, + self._client._get_auth(**alt_creds) )) while not await self._handler.client_ready_async(): await asyncio.sleep(0.05) self._max_message_size_on_link = self._handler.message_handler._link.peer_max_message_size \ or constants.MAX_MESSAGE_LENGTH_BYTES # pylint: disable=protected-access - self.running = True + self._running = True async def _close_handler(self): await self._handler.close_async() # close the link (sharing connection) or connection (not sharing) - self.running = False + self._running = False async def _close_connection(self): await self._close_handler() - await self.client._conn_manager.reset_connection_if_broken() # pylint:disable=protected-access + await self._client._conn_manager.reset_connection_if_broken() # pylint:disable=protected-access async def _handle_exception(self, exception): - if not self.running and isinstance(exception, compat.TimeoutException): + if not self._running and isinstance(exception, compat.TimeoutException): exception = errors.AuthenticationException("Authorization timeout.") return await _handle_exception(exception, self) @@ -90,19 +90,18 @@ async def _do_retryable_operation(self, operation, timeout=None, **kwargs): last_exception = kwargs.pop('last_exception', None) operation_need_param = kwargs.pop('operation_need_param', True) - while retried_times <= self.client.config.max_retries: + while retried_times <= self._client._config.max_retries: try: if operation_need_param: return await operation(timeout_time=timeout_time, last_exception=last_exception, **kwargs) - else: - return await operation() + return await operation() except Exception as exception: # pylint:disable=broad-except last_exception = await self._handle_exception(exception) - await self.client._try_delay(retried_times=retried_times, last_exception=last_exception, - timeout_time=timeout_time, entity_name=self.name) + await self._client._try_delay(retried_times=retried_times, last_exception=last_exception, + timeout_time=timeout_time, entity_name=self._name) retried_times += 1 - log.info("%r has exhausted retry. Exception still occurs (%r)", self.name, last_exception) + log.info("%r has exhausted retry. Exception still occurs (%r)", self._name, last_exception) raise last_exception async def close(self, exception=None): @@ -125,18 +124,18 @@ async def close(self, exception=None): :caption: Close down the handler. """ - self.running = False - if self.error: #type: ignore + self._running = False + if self._error: #type: ignore return if isinstance(exception, errors.LinkRedirect): - self.redirected = exception + self._redirected = exception elif isinstance(exception, EventHubError): - self.error = exception + self._error = exception elif isinstance(exception, (errors.LinkDetach, errors.ConnectionClose)): - self.error = ConnectError(str(exception), exception) + self._error = ConnectError(str(exception), exception) elif exception: - self.error = EventHubError(str(exception)) + self._error = EventHubError(str(exception)) else: - self.error = EventHubError("This receive handler is now closed.") + self._error = EventHubError("This receive handler is now closed.") if self._handler: await self._handler.close_async() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index 23aefaf2aa3e..67f6ab52dd30 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -68,36 +68,36 @@ def _create_auth(self, username=None, password=None): :param password: The shared access key. :type password: str """ - http_proxy = self.config.http_proxy - transport_type = self.config.transport_type - auth_timeout = self.config.auth_timeout + http_proxy = self._config.http_proxy + transport_type = self._config.transport_type + auth_timeout = self._config.auth_timeout - if isinstance(self.credential, EventHubSharedKeyCredential): # pylint:disable=no-else-return + if isinstance(self._credential, EventHubSharedKeyCredential): # pylint:disable=no-else-return username = username or self._auth_config['username'] password = password or self._auth_config['password'] if "@sas.root" in username: return authentication.SASLPlain( - self.host, username, password, http_proxy=http_proxy, transport_type=transport_type) + self._host, username, password, http_proxy=http_proxy, transport_type=transport_type) return authentication.SASTokenAsync.from_shared_access_key( - self.auth_uri, username, password, timeout=auth_timeout, http_proxy=http_proxy, + self._auth_uri, username, password, timeout=auth_timeout, http_proxy=http_proxy, transport_type=transport_type) - elif isinstance(self.credential, EventHubSASTokenCredential): - token = self.credential.get_sas_token() + elif isinstance(self._credential, EventHubSASTokenCredential): + token = self._credential.get_sas_token() try: expiry = int(parse_sas_token(token)['se']) except (KeyError, TypeError, IndexError): raise ValueError("Supplied SAS token has no valid expiry value.") return authentication.SASTokenAsync( - self.auth_uri, self.auth_uri, token, + self._auth_uri, self._auth_uri, token, expires_at=expiry, timeout=auth_timeout, http_proxy=http_proxy, transport_type=transport_type) else: - get_jwt_token = functools.partial(self.credential.get_token, 'https://eventhubs.azure.net//.default') - return authentication.JWTTokenAsync(self.auth_uri, self.auth_uri, + get_jwt_token = functools.partial(self._credential.get_token, 'https://eventhubs.azure.net//.default') + return authentication.JWTTokenAsync(self._auth_uri, self._auth_uri, get_jwt_token, http_proxy=http_proxy, transport_type=transport_type) @@ -105,9 +105,9 @@ async def _close_connection(self): await self._conn_manager.reset_connection_if_broken() async def _try_delay(self, retried_times, last_exception, timeout_time=None, entity_name=None): - entity_name = entity_name or self.container_id - backoff = self.config.backoff_factor * 2 ** retried_times - if backoff <= self.config.backoff_max and ( + entity_name = entity_name or self._container_id + backoff = self._config.backoff_factor * 2 ** retried_times + if backoff <= self._config.backoff_max and ( timeout_time is None or time.time() + backoff <= timeout_time): # pylint:disable=no-else-return await asyncio.sleep(backoff) log.info("%r has an exception (%r). Retrying...", format(entity_name), last_exception) @@ -123,11 +123,11 @@ async def _management_request(self, mgmt_msg, op_type): } retried_times = 0 - while retried_times <= self.config.max_retries: + while retried_times <= self._config.max_retries: mgmt_auth = self._create_auth(**alt_creds) - mgmt_client = AMQPClientAsync(self.mgmt_target, auth=mgmt_auth, debug=self.config.network_tracing) + mgmt_client = AMQPClientAsync(self._mgmt_target, auth=mgmt_auth, debug=self._config.network_tracing) try: - conn = await self._conn_manager.get_connection(self.host, mgmt_auth) + conn = await self._conn_manager.get_connection(self._host, mgmt_auth) await mgmt_client.open_async(connection=conn) response = await mgmt_client.mgmt_request_async( mgmt_msg, @@ -265,12 +265,12 @@ def create_consumer( """ owner_level = kwargs.get("owner_level") operation = kwargs.get("operation") - prefetch = kwargs.get("prefetch") or self.config.prefetch + prefetch = kwargs.get("prefetch") or self._config.prefetch loop = kwargs.get("loop") - path = self.address.path + operation if operation else self.address.path + path = self._address.path + operation if operation else self._address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( - self.address.hostname, path, consumer_group, partition_id) + self._address.hostname, path, consumer_group, partition_id) handler = EventHubConsumer( self, source_url, event_position=event_position, owner_level=owner_level, prefetch=prefetch, loop=loop) @@ -309,10 +309,10 @@ def create_producer( """ - target = "amqps://{}{}".format(self.address.hostname, self.address.path) + target = "amqps://{}{}".format(self._address.hostname, self._address.path) if operation: target = target + operation - send_timeout = self.config.send_timeout if send_timeout is None else send_timeout + send_timeout = self._config.send_timeout if send_timeout is None else send_timeout handler = EventHubProducer( self, target, partition=partition_id, send_timeout=send_timeout, loop=loop) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py index 147550c4d819..d3651a1c9d8f 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py @@ -32,9 +32,9 @@ class EventHubConsumer(ConsumerProducerMixin): # pylint:disable=too-many-instan sometimes referred to as "Non-Epoch Consumers." """ - timeout = 0 - _epoch = b'com.microsoft:epoch' - _timeout = b'com.microsoft:timeout' + _timeout = 0 + _epoch_symbol = b'com.microsoft:epoch' + _timeout_symbol = b'com.microsoft:timeout' def __init__( # pylint: disable=super-init-not-called self, client, source, **kwargs): @@ -64,28 +64,28 @@ def __init__( # pylint: disable=super-init-not-called loop = kwargs.get("loop", None) super(EventHubConsumer, self).__init__() - self.loop = loop or asyncio.get_event_loop() - self.running = False - self.client = client - self.source = source - self.offset = event_position - self.messages_iter = None - self.prefetch = prefetch - self.owner_level = owner_level - self.keep_alive = keep_alive - self.auto_reconnect = auto_reconnect - self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) - self.reconnect_backoff = 1 - self.redirected = None - self.error = None + self._loop = loop or asyncio.get_event_loop() + self._running = False + self._client = client + self._source = source + self._offset = event_position + self._messages_iter = None + self._prefetch = prefetch + self._owner_level = owner_level + self._keep_alive = keep_alive + self._auto_reconnect = auto_reconnect + self._retry_policy = errors.ErrorPolicy(max_retries=self._client._config.max_retries, on_error=_error_handler) # pylint:disable=protected-access + self._reconnect_backoff = 1 + self._redirected = None + self._error = None self._link_properties = {} - partition = self.source.split('/')[-1] - self.partition = partition - self.name = "EHReceiver-{}-partition{}".format(uuid.uuid4(), partition) + partition = self._source.split('/')[-1] + self._partition = partition + self._name = "EHReceiver-{}-partition{}".format(uuid.uuid4(), partition) if owner_level: - self._link_properties[types.AMQPSymbol(self._epoch)] = types.AMQPLong(int(owner_level)) - link_property_timeout_ms = (self.client.config.receive_timeout or self.timeout) * 1000 - self._link_properties[types.AMQPSymbol(self._timeout)] = types.AMQPLong(int(link_property_timeout_ms)) + self._link_properties[types.AMQPSymbol(self._epoch_symbol)] = types.AMQPLong(int(owner_level)) + link_property_timeout_ms = (self._client._config.receive_timeout or self._timeout) * 1000 # pylint:disable=protected-access + self._link_properties[types.AMQPSymbol(self._timeout_symbol)] = types.AMQPLong(int(link_property_timeout_ms)) self._handler = None def __aiter__(self): @@ -93,48 +93,48 @@ def __aiter__(self): async def __anext__(self): retried_times = 0 - while retried_times < self.client.config.max_retries: + while retried_times < self._client._config.max_retries: # pylint:disable=protected-access try: await self._open() - if not self.messages_iter: - self.messages_iter = self._handler.receive_messages_iter_async() - message = await self.messages_iter.__anext__() + if not self._messages_iter: + self._messages_iter = self._handler.receive_messages_iter_async() + message = await self._messages_iter.__anext__() event_data = EventData._from_message(message) # pylint:disable=protected-access - self.offset = EventPosition(event_data.offset, inclusive=False) + self._offset = EventPosition(event_data.offset, inclusive=False) retried_times = 0 return event_data except Exception as exception: # pylint:disable=broad-except last_exception = await self._handle_exception(exception) - await self.client._try_delay(retried_times=retried_times, last_exception=last_exception, - entity_name=self.name) + await self._client._try_delay(retried_times=retried_times, last_exception=last_exception, # pylint:disable=protected-access + entity_name=self._name) retried_times += 1 def _create_handler(self): alt_creds = { - "username": self.client._auth_config.get("iot_username") if self.redirected else None, # pylint:disable=protected-access - "password": self.client._auth_config.get("iot_password") if self.redirected else None # pylint:disable=protected-access + "username": self._client._auth_config.get("iot_username") if self._redirected else None, # pylint:disable=protected-access + "password": self._client._auth_config.get("iot_password") if self._redirected else None # pylint:disable=protected-access } - source = Source(self.source) - if self.offset is not None: - source.set_filter(self.offset._selector()) # pylint:disable=protected-access + source = Source(self._source) + if self._offset is not None: + source.set_filter(self._offset._selector()) # pylint:disable=protected-access self._handler = ReceiveClientAsync( source, - auth=self.client.get_auth(**alt_creds), - debug=self.client.config.network_tracing, - prefetch=self.prefetch, + auth=self._client._get_auth(**alt_creds), # pylint:disable=protected-access + debug=self._client._config.network_tracing, # pylint:disable=protected-access + prefetch=self._prefetch, link_properties=self._link_properties, - timeout=self.timeout, - error_policy=self.retry_policy, - keep_alive_interval=self.keep_alive, - client_name=self.name, - properties=self.client._create_properties( # pylint:disable=protected-access - self.client.config.user_agent), - loop=self.loop) - self.messages_iter = None + timeout=self._timeout, + error_policy=self._retry_policy, + keep_alive_interval=self._keep_alive, + client_name=self._name, + properties=self._client._create_properties( # pylint:disable=protected-access + self._client._config.user_agent), # pylint:disable=protected-access + loop=self._loop) + self._messages_iter = None async def _redirect(self, redirect): - self.messages_iter = None + self._messages_iter = None await super(EventHubConsumer, self)._redirect(redirect) async def _open(self): @@ -145,11 +145,11 @@ async def _open(self): """ # pylint: disable=protected-access - self.redirected = self.redirected or self.client._iothub_redirect_info + self._redirected = self._redirected or self._client._iothub_redirect_info - if not self.running and self.redirected: - self.client._process_redirect_uri(self.redirected) - self.source = self.redirected.address + if not self._running and self._redirected: + self._client._process_redirect_uri(self._redirected) + self._source = self._redirected.address await super(EventHubConsumer, self)._open() async def _open_with_retry(self): @@ -163,7 +163,7 @@ async def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): remaining_time = timeout_time - time.time() if remaining_time <= 0.0: if last_exception: - log.info("%r receive operation timed out. (%r)", self.name, last_exception) + log.info("%r receive operation timed out. (%r)", self._name, last_exception) raise last_exception return data_batch @@ -173,7 +173,7 @@ async def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): timeout=remaining_time_ms) for message in message_batch: event_data = EventData._from_message(message) # pylint:disable=protected-access - self.offset = EventPosition(event_data.offset) + self._offset = EventPosition(event_data.offset) data_batch.append(event_data) return data_batch @@ -223,8 +223,8 @@ async def receive(self, *, max_batch_size=None, timeout=None): """ self._check_closed() - timeout = timeout or self.client.config.receive_timeout - max_batch_size = max_batch_size or min(self.client.config.max_batch_size, self.prefetch) + timeout = timeout or self._client._config.receive_timeout # pylint:disable=protected-access + max_batch_size = max_batch_size or min(self._client._config.max_batch_size, self._prefetch) # pylint:disable=protected-access return await self._receive_with_retry(timeout=timeout, max_batch_size=max_batch_size) @@ -248,18 +248,18 @@ async def close(self, exception=None): :caption: Close down the handler. """ - self.running = False - if self.error: + self._running = False + if self._error: return if isinstance(exception, errors.LinkRedirect): - self.redirected = exception + self._redirected = exception elif isinstance(exception, EventHubError): - self.error = exception + self._error = exception elif isinstance(exception, (errors.LinkDetach, errors.ConnectionClose)): - self.error = ConnectError(str(exception), exception) + self._error = ConnectError(str(exception), exception) elif exception: - self.error = EventHubError(str(exception)) + self._error = EventHubError(str(exception)) else: - self.error = EventHubError("This receive handler is now closed.") + self._error = EventHubError("This receive handler is now closed.") if self._handler: await self._handler.close_async() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py index 58ecee91ad1b..ae1cd8084f3d 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/error_async.py @@ -3,7 +3,6 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import asyncio -import time import logging from uamqp import errors, compat # type: ignore @@ -40,9 +39,9 @@ async def _handle_exception(exception, closable): # pylint:disable=too-many-bra if isinstance(exception, asyncio.CancelledError): raise exception try: - name = closable.name + name = closable._name # pylint: disable=protected-access except AttributeError: - name = closable.container_id + name = closable._container_id # pylint: disable=protected-access if isinstance(exception, KeyboardInterrupt): # pylint:disable=no-else-raise log.info("%r stops due to keyboard interrupt", name) await closable.close() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index f9fb32420e81..999bdc09c787 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -27,7 +27,7 @@ class EventHubProducer(ConsumerProducerMixin): # pylint: disable=too-many-insta to a partition. """ - _timeout = b'com.microsoft:timeout' + _timeout_symbol = b'com.microsoft:timeout' def __init__( # pylint: disable=super-init-not-called self, client, target, **kwargs): @@ -60,42 +60,42 @@ def __init__( # pylint: disable=super-init-not-called loop = kwargs.get("loop", None) super(EventHubProducer, self).__init__() - self.loop = loop or asyncio.get_event_loop() + self._loop = loop or asyncio.get_event_loop() self._max_message_size_on_link = None - self.running = False - self.client = client - self.target = target - self.partition = partition - self.keep_alive = keep_alive - self.auto_reconnect = auto_reconnect - self.timeout = send_timeout - self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) - self.reconnect_backoff = 1 - self.name = "EHProducer-{}".format(uuid.uuid4()) - self.unsent_events = None - self.redirected = None - self.error = None + self._running = False + self._client = client + self._target = target + self._partition = partition + self._keep_alive = keep_alive + self._auto_reconnect = auto_reconnect + self._timeout = send_timeout + self._retry_policy = errors.ErrorPolicy(max_retries=self._client._config.max_retries, on_error=_error_handler) # pylint:disable=protected-access + self._reconnect_backoff = 1 + self._name = "EHProducer-{}".format(uuid.uuid4()) + self._unsent_events = None + self._redirected = None + self._error = None if partition: - self.target += "/Partitions/" + partition - self.name += "-partition{}".format(partition) + self._target += "/Partitions/" + partition + self._name += "-partition{}".format(partition) self._handler = None self._outcome = None self._condition = None - self._link_properties = {types.AMQPSymbol(self._timeout): types.AMQPLong(int(self.timeout * 1000))} + self._link_properties = {types.AMQPSymbol(self._timeout_symbol): types.AMQPLong(int(self._timeout * 1000))} def _create_handler(self): self._handler = SendClientAsync( - self.target, - auth=self.client.get_auth(), - debug=self.client.config.network_tracing, - msg_timeout=self.timeout, - error_policy=self.retry_policy, - keep_alive_interval=self.keep_alive, - client_name=self.name, + self._target, + auth=self._client._get_auth(), # pylint:disable=protected-access + debug=self._client._config.network_tracing, # pylint:disable=protected-access + msg_timeout=self._timeout, + error_policy=self._retry_policy, + keep_alive_interval=self._keep_alive, + client_name=self._name, link_properties=self._link_properties, - properties=self.client._create_properties( # pylint: disable=protected-access - self.client.config.user_agent), - loop=self.loop) + properties=self._client._create_properties( # pylint: disable=protected-access + self._client._config.user_agent), # pylint:disable=protected-access + loop=self._loop) async def _open(self): """ @@ -104,16 +104,16 @@ async def _open(self): context will be used to create a new handler before opening it. """ - if not self.running and self.redirected: - self.client._process_redirect_uri(self.redirected) # pylint: disable=protected-access - self.target = self.redirected.address + if not self._running and self._redirected: + self._client._process_redirect_uri(self._redirected) # pylint: disable=protected-access + self._target = self._redirected.address await super(EventHubProducer, self)._open() async def _open_with_retry(self): return await self._do_retryable_operation(self._open, operation_need_param=False) async def _send_event_data(self, timeout_time=None, last_exception=None): - if self.unsent_events: + if self._unsent_events: await self._open() remaining_time = timeout_time - time.time() if remaining_time <= 0.0: @@ -121,12 +121,12 @@ async def _send_event_data(self, timeout_time=None, last_exception=None): error = last_exception else: error = OperationTimeoutError("send operation timed out") - log.info("%r send operation timed out. (%r)", self.name, error) + log.info("%r send operation timed out. (%r)", self._name, error) raise error self._handler._msg_timeout = remaining_time # pylint: disable=protected-access - self._handler.queue_message(*self.unsent_events) + self._handler.queue_message(*self._unsent_events) await self._handler.wait_async() - self.unsent_events = self._handler.pending_messages + self._unsent_events = self._handler.pending_messages if self._outcome != constants.MessageSendResult.Ok: if self._outcome == constants.MessageSendResult.Timeout: self._condition = OperationTimeoutError("send operation timed out") @@ -228,7 +228,7 @@ async def send( event_data = _set_partition_key(event_data, partition_key) wrapper_event_data = EventDataBatch._from_batch(event_data, partition_key) # pylint: disable=protected-access wrapper_event_data.message.on_send_complete = self._on_outcome - self.unsent_events = [wrapper_event_data.message] + self._unsent_events = [wrapper_event_data.message] await self._send_event_data_with_retry(timeout=timeout) # pylint:disable=unexpected-keyword-arg # TODO: to refactor async def close(self, exception=None): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index 347b1263be2c..90a1ac86742f 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -68,38 +68,38 @@ def _create_auth(self, username=None, password=None): :param password: The shared access key. :type password: str """ - http_proxy = self.config.http_proxy - transport_type = self.config.transport_type - auth_timeout = self.config.auth_timeout + http_proxy = self._config.http_proxy + transport_type = self._config.transport_type + auth_timeout = self._config.auth_timeout # TODO: the following code can be refactored to create auth from classes directly instead of using if-else - if isinstance(self.credential, EventHubSharedKeyCredential): # pylint:disable=no-else-return + if isinstance(self._credential, EventHubSharedKeyCredential): # pylint:disable=no-else-return username = username or self._auth_config['username'] password = password or self._auth_config['password'] if "@sas.root" in username: return authentication.SASLPlain( - self.host, username, password, http_proxy=http_proxy, transport_type=transport_type) + self._host, username, password, http_proxy=http_proxy, transport_type=transport_type) return authentication.SASTokenAuth.from_shared_access_key( - self.auth_uri, username, password, timeout=auth_timeout, http_proxy=http_proxy, + self._auth_uri, username, password, timeout=auth_timeout, http_proxy=http_proxy, transport_type=transport_type) - elif isinstance(self.credential, EventHubSASTokenCredential): - token = self.credential.get_sas_token() + elif isinstance(self._credential, EventHubSASTokenCredential): + token = self._credential.get_sas_token() try: expiry = int(parse_sas_token(token)['se']) except (KeyError, TypeError, IndexError): raise ValueError("Supplied SAS token has no valid expiry value.") return authentication.SASTokenAuth( - self.auth_uri, self.auth_uri, token, + self._auth_uri, self._auth_uri, token, expires_at=expiry, timeout=auth_timeout, http_proxy=http_proxy, transport_type=transport_type) else: # Azure credential - get_jwt_token = functools.partial(self.credential.get_token, + get_jwt_token = functools.partial(self._credential.get_token, 'https://eventhubs.azure.net//.default') - return authentication.JWTTokenAuth(self.auth_uri, self.auth_uri, + return authentication.JWTTokenAuth(self._auth_uri, self._auth_uri, get_jwt_token, http_proxy=http_proxy, transport_type=transport_type) @@ -107,9 +107,9 @@ def _close_connection(self): self._conn_manager.reset_connection_if_broken() def _try_delay(self, retried_times, last_exception, timeout_time=None, entity_name=None): - entity_name = entity_name or self.container_id - backoff = self.config.backoff_factor * 2 ** retried_times - if backoff <= self.config.backoff_max and ( + entity_name = entity_name or self._container_id + backoff = self._config.backoff_factor * 2 ** retried_times + if backoff <= self._config.backoff_max and ( timeout_time is None or time.time() + backoff <= timeout_time): # pylint:disable=no-else-return time.sleep(backoff) log.info("%r has an exception (%r). Retrying...", format(entity_name), last_exception) @@ -125,11 +125,11 @@ def _management_request(self, mgmt_msg, op_type): } retried_times = 0 - while retried_times <= self.config.max_retries: + while retried_times <= self._config.max_retries: mgmt_auth = self._create_auth(**alt_creds) - mgmt_client = uamqp.AMQPClient(self.mgmt_target) + mgmt_client = uamqp.AMQPClient(self._mgmt_target) try: - conn = self._conn_manager.get_connection(self.host, mgmt_auth) #pylint:disable=assignment-from-none + conn = self._conn_manager.get_connection(self._host, mgmt_auth) #pylint:disable=assignment-from-none mgmt_client.open(connection=conn) response = mgmt_client.mgmt_request( mgmt_msg, @@ -262,11 +262,11 @@ def create_consumer(self, consumer_group, partition_id, event_position, **kwargs """ owner_level = kwargs.get("owner_level") operation = kwargs.get("operation") - prefetch = kwargs.get("prefetch") or self.config.prefetch + prefetch = kwargs.get("prefetch") or self._config.prefetch - path = self.address.path + operation if operation else self.address.path + path = self._address.path + operation if operation else self._address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( - self.address.hostname, path, consumer_group, partition_id) + self._address.hostname, path, consumer_group, partition_id) handler = EventHubConsumer( self, source_url, event_position=event_position, owner_level=owner_level, prefetch=prefetch) @@ -299,10 +299,10 @@ def create_producer(self, partition_id=None, operation=None, send_timeout=None): """ - target = "amqps://{}{}".format(self.address.hostname, self.address.path) + target = "amqps://{}{}".format(self._address.hostname, self._address.path) if operation: target = target + operation - send_timeout = self.config.send_timeout if send_timeout is None else send_timeout + send_timeout = self._config.send_timeout if send_timeout is None else send_timeout handler = EventHubProducer( self, target, partition=partition_id, send_timeout=send_timeout) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 8b6712ce207e..7d4c8cd2712e 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -133,31 +133,32 @@ def __init__(self, host, event_hub_path, credential, **kwargs): queued. Default value is 60 seconds. If set to 0, there will be no timeout. :type send_timeout: float """ - self.container_id = "eventhub.pysdk-" + str(uuid.uuid4())[:8] - self.address = _Address() - self.address.hostname = host - self.address.path = "/" + event_hub_path if event_hub_path else "" + self.eh_name = event_hub_path + self._host = host + self._container_id = "eventhub.pysdk-" + str(uuid.uuid4())[:8] + self._address = _Address() + self._address.hostname = host + self._address.path = "/" + event_hub_path if event_hub_path else "" self._auth_config = {} # type:Dict[str,str] - self.credential = credential + self._credential = credential if isinstance(credential, EventHubSharedKeyCredential): - self.username = credential.policy - self.password = credential.key - self._auth_config['username'] = self.username - self._auth_config['password'] = self.password + self._username = credential.policy + self._password = credential.key + self._auth_config['username'] = self._username + self._auth_config['password'] = self._password - self.host = host - self.eh_name = event_hub_path - self.keep_alive = kwargs.get("keep_alive", 30) - self.auto_reconnect = kwargs.get("auto_reconnect", True) - self.mgmt_target = "amqps://{}/{}".format(self.host, self.eh_name) - self.auth_uri = "sb://{}{}".format(self.address.hostname, self.address.path) - self.get_auth = functools.partial(self._create_auth) - self.config = _Configuration(**kwargs) - self.debug = self.config.network_tracing + self._keep_alive = kwargs.get("keep_alive", 30) + self._auto_reconnect = kwargs.get("auto_reconnect", True) + self._mgmt_target = "amqps://{}/{}".format(self._host, self.eh_name) + self._auth_uri = "sb://{}{}".format(self._address.hostname, self._address.path) + self._get_auth = functools.partial(self._create_auth) + self._config = _Configuration(**kwargs) + self._debug = self._config.network_tracing self._is_iothub = False self._iothub_redirect_info = None + self._redirect_consumer = None - log.info("%r: Created the Event Hub client", self.container_id) + log.info("%r: Created the Event Hub client", self._container_id) @classmethod def _from_iothub_connection_string(cls, conn_str, **kwargs): @@ -176,8 +177,8 @@ def _from_iothub_connection_string(cls, conn_str, **kwargs): 'iot_password': key, 'username': username, 'password': password} - client._is_iothub = True - client._redirect_consumer = client.create_consumer(consumer_group='$default', + client._is_iothub = True # pylint: disable=protected-access + client._redirect_consumer = client.create_consumer(consumer_group='$default', # pylint: disable=protected-access, no-member partition_id='0', event_position=EventPosition('-1'), operation='/messages/events') @@ -216,11 +217,11 @@ def _create_properties(self, user_agent=None): # pylint: disable=no-self-use def _process_redirect_uri(self, redirect): redirect_uri = redirect.address.decode('utf-8') auth_uri, _, _ = redirect_uri.partition("/ConsumerGroups") - self.address = urlparse(auth_uri) - self.host = self.address.hostname - self.auth_uri = "sb://{}{}".format(self.address.hostname, self.address.path) - self.eh_name = self.address.path.lstrip('/') - self.mgmt_target = redirect_uri + self._address = urlparse(auth_uri) + self._host = self._address.hostname + self.eh_name = self._address.path.lstrip('/') + self._auth_uri = "sb://{}{}".format(self._address.hostname, self._address.path) + self._mgmt_target = redirect_uri if self._is_iothub: self._iothub_redirect_info = redirect diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py index e10f52e61b59..0e89ba9bc55f 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py @@ -34,9 +34,9 @@ class EventHubConsumer(ConsumerProducerMixin): # pylint:disable=too-many-instan sometimes referred to as "Non-Epoch Consumers." """ - timeout = 0 - _epoch = b'com.microsoft:epoch' - _timeout = b'com.microsoft:timeout' + _timeout = 0 + _epoch_symbol = b'com.microsoft:epoch' + _timeout_symbol = b'com.microsoft:timeout' def __init__(self, client, source, **kwargs): """ @@ -61,27 +61,27 @@ def __init__(self, client, source, **kwargs): auto_reconnect = kwargs.get("auto_reconnect", True) super(EventHubConsumer, self).__init__() - self.running = False - self.client = client - self.source = source - self.offset = event_position - self.messages_iter = None - self.prefetch = prefetch - self.owner_level = owner_level - self.keep_alive = keep_alive - self.auto_reconnect = auto_reconnect - self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) - self.reconnect_backoff = 1 + self._running = False + self._client = client + self._source = source + self._offset = event_position + self._messages_iter = None + self._prefetch = prefetch + self._owner_level = owner_level + self._keep_alive = keep_alive + self._auto_reconnect = auto_reconnect + self._retry_policy = errors.ErrorPolicy(max_retries=self._client._config.max_retries, on_error=_error_handler) # pylint:disable=protected-access + self._reconnect_backoff = 1 self._link_properties = {} - self.redirected = None - self.error = None - partition = self.source.split('/')[-1] - self.partition = partition - self.name = "EHConsumer-{}-partition{}".format(uuid.uuid4(), partition) + self._redirected = None + self._error = None + partition = self._source.split('/')[-1] + self._partition = partition + self._name = "EHConsumer-{}-partition{}".format(uuid.uuid4(), partition) if owner_level: - self._link_properties[types.AMQPSymbol(self._epoch)] = types.AMQPLong(int(owner_level)) - link_property_timeout_ms = (self.client.config.receive_timeout or self.timeout) * 1000 - self._link_properties[types.AMQPSymbol(self._timeout)] = types.AMQPLong(int(link_property_timeout_ms)) + self._link_properties[types.AMQPSymbol(self._epoch_symbol)] = types.AMQPLong(int(owner_level)) + link_property_timeout_ms = (self._client._config.receive_timeout or self._timeout) * 1000 # pylint:disable=protected-access + self._link_properties[types.AMQPSymbol(self._timeout_symbol)] = types.AMQPLong(int(link_property_timeout_ms)) self._handler = None def __iter__(self): @@ -89,47 +89,47 @@ def __iter__(self): def __next__(self): retried_times = 0 - while retried_times < self.client.config.max_retries: + while retried_times < self._client._config.max_retries: # pylint:disable=protected-access try: self._open() - if not self.messages_iter: - self.messages_iter = self._handler.receive_messages_iter() - message = next(self.messages_iter) + if not self._messages_iter: + self._messages_iter = self._handler.receive_messages_iter() + message = next(self._messages_iter) event_data = EventData._from_message(message) # pylint:disable=protected-access - self.offset = EventPosition(event_data.offset, inclusive=False) + self._offset = EventPosition(event_data.offset, inclusive=False) retried_times = 0 return event_data except Exception as exception: # pylint:disable=broad-except last_exception = self._handle_exception(exception) - self.client._try_delay(retried_times=retried_times, last_exception=last_exception, - entity_name=self.name) + self._client._try_delay(retried_times=retried_times, last_exception=last_exception, # pylint:disable=protected-access + entity_name=self._name) retried_times += 1 def _create_handler(self): alt_creds = { - "username": self.client._auth_config.get("iot_username") if self.redirected else None, # pylint:disable=protected-access - "password": self.client._auth_config.get("iot_password") if self.redirected else None # pylint:disable=protected-access + "username": self._client._auth_config.get("iot_username") if self._redirected else None, # pylint:disable=protected-access + "password": self._client._auth_config.get("iot_password") if self._redirected else None # pylint:disable=protected-access } - source = Source(self.source) - if self.offset is not None: - source.set_filter(self.offset._selector()) # pylint:disable=protected-access + source = Source(self._source) + if self._offset is not None: + source.set_filter(self._offset._selector()) # pylint:disable=protected-access self._handler = ReceiveClient( source, - auth=self.client.get_auth(**alt_creds), - debug=self.client.config.network_tracing, - prefetch=self.prefetch, + auth=self._client._get_auth(**alt_creds), # pylint:disable=protected-access + debug=self._client._config.network_tracing, # pylint:disable=protected-access + prefetch=self._prefetch, link_properties=self._link_properties, - timeout=self.timeout, - error_policy=self.retry_policy, - keep_alive_interval=self.keep_alive, - client_name=self.name, - properties=self.client._create_properties( # pylint:disable=protected-access - self.client.config.user_agent)) - self.messages_iter = None + timeout=self._timeout, + error_policy=self._retry_policy, + keep_alive_interval=self._keep_alive, + client_name=self._name, + properties=self._client._create_properties( # pylint:disable=protected-access + self._client._config.user_agent)) # pylint:disable=protected-access + self._messages_iter = None def _redirect(self, redirect): - self.messages_iter = None + self._messages_iter = None super(EventHubConsumer, self)._redirect(redirect) def _open(self): @@ -140,11 +140,11 @@ def _open(self): """ # pylint: disable=protected-access - self.redirected = self.redirected or self.client._iothub_redirect_info + self._redirected = self._redirected or self._client._iothub_redirect_info - if not self.running and self.redirected: - self.client._process_redirect_uri(self.redirected) - self.source = self.redirected.address + if not self._running and self._redirected: + self._client._process_redirect_uri(self._redirected) + self._source = self._redirected.address super(EventHubConsumer, self)._open() def _open_with_retry(self): @@ -158,7 +158,7 @@ def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): remaining_time = timeout_time - time.time() if remaining_time <= 0.0: if last_exception: - log.info("%r receive operation timed out. (%r)", self.name, last_exception) + log.info("%r receive operation timed out. (%r)", self._name, last_exception) raise last_exception return data_batch remaining_time_ms = 1000 * remaining_time @@ -167,7 +167,7 @@ def _receive(self, timeout_time=None, max_batch_size=None, **kwargs): timeout=remaining_time_ms) for message in message_batch: event_data = EventData._from_message(message) # pylint:disable=protected-access - self.offset = EventPosition(event_data.offset) + self._offset = EventPosition(event_data.offset) data_batch.append(event_data) return data_batch @@ -217,8 +217,8 @@ def receive(self, max_batch_size=None, timeout=None): """ self._check_closed() - timeout = timeout or self.client.config.receive_timeout - max_batch_size = max_batch_size or min(self.client.config.max_batch_size, self.prefetch) + timeout = timeout or self._client._config.receive_timeout # pylint:disable=protected-access + max_batch_size = max_batch_size or min(self._client._config.max_batch_size, self._prefetch) # pylint:disable=protected-access return self._receive_with_retry(timeout=timeout, max_batch_size=max_batch_size) @@ -242,9 +242,9 @@ def close(self, exception=None): :caption: Close down the handler. """ - if self.messages_iter: - self.messages_iter.close() - self.messages_iter = None + if self._messages_iter: + self._messages_iter.close() + self._messages_iter = None super(EventHubConsumer, self).close(exception) next = __next__ # for python2.7 diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py index 6db54e5977de..129cf14a3842 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py @@ -2,7 +2,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -import time import logging import six @@ -159,9 +158,9 @@ def _create_eventhub_exception(exception): def _handle_exception(exception, closable): # pylint:disable=too-many-branches, too-many-statements try: # closable is a producer/consumer object - name = closable.name + name = closable._name # pylint: disable=protected-access except AttributeError: # closable is an client object - name = closable.container_id + name = closable._container_id # pylint: disable=protected-access if isinstance(exception, KeyboardInterrupt): # pylint:disable=no-else-raise log.info("%r stops due to keyboard interrupt", name) closable.close() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/event_processor.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/event_processor.py index 85020257df46..71741c56dffa 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/event_processor.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/event_processor.py @@ -79,8 +79,8 @@ def __init__(self, eventhub_client: EventHubClient, consumer_group_name: str, self._partition_processor_factory = partition_processor_factory self._partition_manager = partition_manager self._initial_event_position = kwargs.get("initial_event_position", "-1") - self._max_batch_size = eventhub_client.config.max_batch_size - self._receive_timeout = eventhub_client.config.receive_timeout + self._max_batch_size = eventhub_client._config.max_batch_size + self._receive_timeout = eventhub_client._config.receive_timeout self._tasks = [] # type: List[asyncio.Task] self._id = str(uuid.uuid4()) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py index c019a30ee7b8..8008fac7ecd0 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py @@ -40,7 +40,7 @@ class EventHubProducer(ConsumerProducerMixin): # pylint:disable=too-many-instan to a partition. """ - _timeout = b'com.microsoft:timeout' + _timeout_symbol = b'com.microsoft:timeout' def __init__(self, client, target, **kwargs): """ @@ -71,38 +71,38 @@ def __init__(self, client, target, **kwargs): super(EventHubProducer, self).__init__() self._max_message_size_on_link = None - self.running = False - self.client = client - self.target = target - self.partition = partition - self.timeout = send_timeout - self.redirected = None - self.error = None - self.keep_alive = keep_alive - self.auto_reconnect = auto_reconnect - self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) - self.reconnect_backoff = 1 - self.name = "EHProducer-{}".format(uuid.uuid4()) - self.unsent_events = None + self._running = False + self._client = client + self._target = target + self._partition = partition + self._timeout = send_timeout + self._redirected = None + self._error = None + self._keep_alive = keep_alive + self._auto_reconnect = auto_reconnect + self._retry_policy = errors.ErrorPolicy(max_retries=self._client._config.max_retries, on_error=_error_handler) # pylint: disable=protected-access + self._reconnect_backoff = 1 + self._name = "EHProducer-{}".format(uuid.uuid4()) + self._unsent_events = None if partition: - self.target += "/Partitions/" + partition - self.name += "-partition{}".format(partition) + self._target += "/Partitions/" + partition + self._name += "-partition{}".format(partition) self._handler = None self._outcome = None self._condition = None - self._link_properties = {types.AMQPSymbol(self._timeout): types.AMQPLong(int(self.timeout * 1000))} + self._link_properties = {types.AMQPSymbol(self._timeout_symbol): types.AMQPLong(int(self._timeout * 1000))} def _create_handler(self): self._handler = SendClient( - self.target, - auth=self.client.get_auth(), - debug=self.client.config.network_tracing, - msg_timeout=self.timeout, - error_policy=self.retry_policy, - keep_alive_interval=self.keep_alive, - client_name=self.name, + self._target, + auth=self._client._get_auth(), # pylint:disable=protected-access + debug=self._client._config.network_tracing, # pylint:disable=protected-access + msg_timeout=self._timeout, + error_policy=self._retry_policy, + keep_alive_interval=self._keep_alive, + client_name=self._name, link_properties=self._link_properties, - properties=self.client._create_properties(self.client.config.user_agent)) # pylint: disable=protected-access + properties=self._client._create_properties(self._client._config.user_agent)) # pylint: disable=protected-access def _open(self): """ @@ -111,16 +111,16 @@ def _open(self): context will be used to create a new handler before opening it. """ - if not self.running and self.redirected: - self.client._process_redirect_uri(self.redirected) # pylint: disable=protected-access - self.target = self.redirected.address + if not self._running and self._redirected: + self._client._process_redirect_uri(self._redirected) # pylint: disable=protected-access + self._target = self._redirected.address super(EventHubProducer, self)._open() def _open_with_retry(self): return self._do_retryable_operation(self._open, operation_need_param=False) def _send_event_data(self, timeout_time=None, last_exception=None): - if self.unsent_events: + if self._unsent_events: self._open() remaining_time = timeout_time - time.time() if remaining_time <= 0.0: @@ -128,12 +128,12 @@ def _send_event_data(self, timeout_time=None, last_exception=None): error = last_exception else: error = OperationTimeoutError("send operation timed out") - log.info("%r send operation timed out. (%r)", self.name, error) + log.info("%r send operation timed out. (%r)", self._name, error) raise error self._handler._msg_timeout = remaining_time # pylint: disable=protected-access - self._handler.queue_message(*self.unsent_events) + self._handler.queue_message(*self._unsent_events) self._handler.wait() - self.unsent_events = self._handler.pending_messages + self._unsent_events = self._handler.pending_messages if self._outcome != constants.MessageSendResult.Ok: if self._outcome == constants.MessageSendResult.Timeout: self._condition = OperationTimeoutError("send operation timed out") @@ -233,7 +233,7 @@ def send(self, event_data, partition_key=None, timeout=None): event_data = _set_partition_key(event_data, partition_key) wrapper_event_data = EventDataBatch._from_batch(event_data, partition_key) # pylint: disable=protected-access wrapper_event_data.message.on_send_complete = self._on_outcome - self.unsent_events = [wrapper_event_data.message] + self._unsent_events = [wrapper_event_data.message] self._send_event_data_with_retry(timeout=timeout) def close(self, exception=None): # pylint:disable=useless-super-delegation diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py index 9226a5d62a93..4ac63eef5d7f 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py @@ -16,7 +16,7 @@ async def pump(receiver, sleep=None): if sleep: await asyncio.sleep(sleep) async with receiver: - batch = await receiver.receive(timeout=3) + batch = await receiver.receive(timeout=10) messages += len(batch) return messages @@ -67,7 +67,7 @@ async def test_iothub_receive_after_mgmt_ops_async(iot_connection_str, device_id assert partitions == ["0", "1", "2", "3"] receiver = client.create_consumer(consumer_group="$default", partition_id=partitions[0], event_position=EventPosition("-1"), operation='/messages/events') async with receiver: - received = await receiver.receive(timeout=5) + received = await receiver.receive(timeout=10) assert len(received) == 0 @@ -77,7 +77,7 @@ async def test_iothub_mgmt_ops_after_receive_async(iot_connection_str, device_id client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') async with receiver: - received = await receiver.receive(timeout=5) + received = await receiver.receive(timeout=10) assert len(received) == 0 partitions = await client.get_partition_ids() diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py index 900612684001..50ababacf738 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py @@ -72,7 +72,7 @@ async def pump(_pid, receiver, _args, _dl): total, batch[-1].sequence_number, batch[-1].offset)) - print("{}: Total received {}".format(receiver.partition, total)) + print("{}: Total received {}".format(receiver._partition, total)) except Exception as e: print("Partition {} receiver failed: {}".format(_pid, e)) raise diff --git a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py index ac7e211bd736..595c822b9cb7 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py @@ -14,7 +14,7 @@ def test_iothub_receive_sync(iot_connection_str, device_id): client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') try: - received = receiver.receive(timeout=5) + received = receiver.receive(timeout=10) assert len(received) == 0 finally: receiver.close() @@ -48,7 +48,7 @@ def test_iothub_receive_after_mgmt_ops_sync(iot_connection_str, device_id): assert partitions == ["0", "1", "2", "3"] receiver = client.create_consumer(consumer_group="$default", partition_id=partitions[0], event_position=EventPosition("-1"), operation='/messages/events') with receiver: - received = receiver.receive(timeout=5) + received = receiver.receive(timeout=10) assert len(received) == 0 @@ -57,7 +57,7 @@ def test_iothub_mgmt_ops_after_receive_sync(iot_connection_str, device_id): client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') with receiver: - received = receiver.receive(timeout=5) + received = receiver.receive(timeout=10) assert len(received) == 0 partitions = client.get_partition_ids() diff --git a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py index 47559b778af3..5a6e42a827e3 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py @@ -62,17 +62,17 @@ def pump(receiver, duration): iteration += 1 if size == 0: print("{}: No events received, queue size {}, delivered {}".format( - receiver.partition, + receiver._partition, receiver.queue_size, total)) elif iteration >= 5: iteration = 0 print("{}: total received {}, last sn={}, last offset={}".format( - receiver.partition, + receiver._partition, total, batch[-1].sequence_number, batch[-1].offset)) - print("{}: Total received {}".format(receiver.partition, total)) + print("{}: Total received {}".format(receiver._partition, total)) except Exception as e: print("EventHubConsumer failed: {}".format(e)) raise From f616f371cdc3ab798ba1f1ddc657ff7d331c15a2 Mon Sep 17 00:00:00 2001 From: "Adam Ling (MSFT)" <47871814+yunhaoling@users.noreply.github.com> Date: Fri, 6 Sep 2019 13:38:41 -0700 Subject: [PATCH 31/51] Update samples and codes according to the review (#7098) * Update samples and codes according to the review * Small update --- .../azure/eventhub/_consumer_producer_mixin.py | 9 ++++----- .../eventhub/aio/_consumer_producer_mixin_async.py | 9 ++++----- .../examples/async_examples/send_async.py | 6 ++---- sdk/eventhub/azure-eventhubs/examples/iothub_recv.py | 4 +--- sdk/eventhub/azure-eventhubs/examples/recv.py | 5 +---- sdk/eventhub/azure-eventhubs/examples/send.py | 11 ++++++----- .../azure-eventhubs/examples/send_event_data_batch.py | 4 +--- .../examples/send_list_of_event_data.py | 3 ++- 8 files changed, 21 insertions(+), 30 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py index ef32cb7a591c..a14da749ee78 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_producer_mixin.py @@ -80,11 +80,10 @@ def _handle_exception(self, exception): return _handle_exception(exception, self) - def _do_retryable_operation(self, operation, timeout=None, **kwargs): + def _do_retryable_operation(self, operation, timeout=100000, **kwargs): # pylint:disable=protected-access - if not timeout: - timeout = 100000 # timeout equals to 0 means no timeout, set the value to be a large number. - timeout_time = time.time() + timeout + timeout_time = time.time() + ( + timeout if timeout else 100000) # timeout equals to 0 means no timeout, set the value to be a large number. retried_times = 0 last_exception = kwargs.pop('last_exception', None) operation_need_param = kwargs.pop('operation_need_param', True) @@ -100,7 +99,7 @@ def _do_retryable_operation(self, operation, timeout=None, **kwargs): timeout_time=timeout_time, entity_name=self._name) retried_times += 1 - log.info("%r has exhausted retry. Exception still occurs (%r)", self._name, last_exception) + log.info("%r operation has exhausted retry. Last exception: %r.", self._name, last_exception) raise last_exception def close(self, exception=None): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py index 33c944d41be4..444edd15a8a1 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/_consumer_producer_mixin_async.py @@ -81,11 +81,10 @@ async def _handle_exception(self, exception): return await _handle_exception(exception, self) - async def _do_retryable_operation(self, operation, timeout=None, **kwargs): + async def _do_retryable_operation(self, operation, timeout=100000, **kwargs): # pylint:disable=protected-access - if not timeout: - timeout = 100000 # timeout equals to 0 means no timeout, set the value to be a large number. - timeout_time = time.time() + timeout + timeout_time = time.time() + ( + timeout if timeout else 100000) # timeout equals to 0 means no timeout, set the value to be a large number. retried_times = 0 last_exception = kwargs.pop('last_exception', None) operation_need_param = kwargs.pop('operation_need_param', True) @@ -101,7 +100,7 @@ async def _do_retryable_operation(self, operation, timeout=None, **kwargs): timeout_time=timeout_time, entity_name=self._name) retried_times += 1 - log.info("%r has exhausted retry. Exception still occurs (%r)", self._name, last_exception) + log.info("%r operation has exhausted retry. Last exception: %r.", self._name, last_exception) raise last_exception async def close(self, exception=None): diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py index ac9ad098fee5..d24e73d0bc17 100644 --- a/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py @@ -6,7 +6,7 @@ # -------------------------------------------------------------------------------------------- """ -An example to show sending events asynchronously to an Event Hub with partition keys. +An example to show sending individual events asynchronously to an Event Hub. """ # pylint: disable=C0111 @@ -45,6 +45,4 @@ async def send(producer, count): run(client)) start_time = time.time() loop.run_until_complete(tasks) -end_time = time.time() -run_time = end_time - start_time -print("Runtime: {} seconds".format(run_time)) +print("Runtime: {} seconds".format(time.time() - start_time)) diff --git a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py index 0542f9c82b9c..ecc935669d13 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py +++ b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py @@ -17,9 +17,7 @@ client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') + with consumer: received = consumer.receive(timeout=5) print(received) - -eh_info = client.get_properties() -print(eh_info) diff --git a/sdk/eventhub/azure-eventhubs/examples/recv.py b/sdk/eventhub/azure-eventhubs/examples/recv.py index 68f0b5c214e8..11ed8747fc22 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv.py @@ -41,7 +41,4 @@ print(event_data.body_as_str()) total += 1 batch = consumer.receive(timeout=5) - - end_time = time.time() - run_time = end_time - start_time - print("Received {} messages in {} seconds".format(total, run_time)) + print("Received {} messages in {} seconds".format(total, time.time() - start_time)) diff --git a/sdk/eventhub/azure-eventhubs/examples/send.py b/sdk/eventhub/azure-eventhubs/examples/send.py index d559989c69a8..219d417447c1 100644 --- a/sdk/eventhub/azure-eventhubs/examples/send.py +++ b/sdk/eventhub/azure-eventhubs/examples/send.py @@ -6,9 +6,9 @@ # -------------------------------------------------------------------------------------------- """ -An example to show sending events to an Event Hub partition. -This is just an example of sending EventData, not performance optimal. -To have the best performance, send a batch EventData with one send() call. +An example to show sending individual events to an Event Hub partition. +Although this works, sending events in batches will get better performance. +See 'send_list_of_event_data.py' and 'send_event_data_batch.py' for an example of batching. """ # pylint: disable=C0111 @@ -26,10 +26,11 @@ client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) producer = client.create_producer(partition_id="0") + start_time = time.time() with producer: - # not performance optimal, but works. Please do send events in batch to get much better performance. for i in range(100): ed = EventData("msg") print("Sending message: {}".format(i)) - producer.send(ed) # please use batch_send for better performance. Refer to event_data_batch.py + producer.send(ed) +print("Send 100 messages in {} seconds".format(time.time() - start_time)) diff --git a/sdk/eventhub/azure-eventhubs/examples/send_event_data_batch.py b/sdk/eventhub/azure-eventhubs/examples/send_event_data_batch.py index 3ccaaf0a4fca..dfb7b8f3f749 100644 --- a/sdk/eventhub/azure-eventhubs/examples/send_event_data_batch.py +++ b/sdk/eventhub/azure-eventhubs/examples/send_event_data_batch.py @@ -42,6 +42,4 @@ def create_batch_data(producer): with producer: event_data_batch = create_batch_data(producer) producer.send(event_data_batch) -end_time = time.time() -run_time = end_time - start_time -print("Runtime: {} seconds".format(run_time)) +print("Runtime: {} seconds".format(time.time() - start_time)) diff --git a/sdk/eventhub/azure-eventhubs/examples/send_list_of_event_data.py b/sdk/eventhub/azure-eventhubs/examples/send_list_of_event_data.py index 74441d0f5bc3..715c220e6417 100644 --- a/sdk/eventhub/azure-eventhubs/examples/send_list_of_event_data.py +++ b/sdk/eventhub/azure-eventhubs/examples/send_list_of_event_data.py @@ -28,6 +28,7 @@ event_list = [] for i in range(1500): event_list.append(EventData('Hello World')) +start_time = time.time() with producer: - start_time = time.time() producer.send(event_list) +print("Runtime: {} seconds".format(time.time() - start_time)) From dad5baa2db1e8b80f277346d94b3c79722c61f29 Mon Sep 17 00:00:00 2001 From: Yijun Xie <48257664+YijunXieMS@users.noreply.github.com> Date: Fri, 6 Sep 2019 21:40:24 -0700 Subject: [PATCH 32/51] Python EventHubs load balancing (#6901) * Draft EventProcessor Loadbalancing * EventProcessor Load balancing * small changes from bryan's review * remove checkpoint manager from initialize * small changes * Draft EventProcessor Loadbalancing * EventProcessor Load balancing * small changes from bryan's review * remove checkpoint manager from initialize * small changes * Fix code review feedback * Packaging update of azure-mgmt-datalake-analytics * Packaging update of azure-loganalytics * Packaging update of azure-mgmt-storage * code review fixes and pylint error * reduce dictionary access * Revert "Packaging update of azure-mgmt-storage" This reverts commit cf22c7c2e6f83d4e31a741c190ab63e19d417cc2. * Revert "Packaging update of azure-loganalytics" This reverts commit 40c7f03b3940069479340effe10afde00c41cbb4. * Revert "Packaging update of azure-mgmt-datalake-analytics" This reverts commit c126bea5d4053c7c720dea12e8826e0f36f0a5e5. * Trivial code change * Refine exception handling for eventprocessor * Enable pylint for eventprocessor * Expose OwnershipLostError * Move eventprocessor to aio rename Sqlite3PartitionManager to SamplePartitionManager * change checkpoint_manager to partition context * fix pylint error * fix a small issue * Catch list_ownership/claim_ownership exceptions and retry * Fix code review issues * fix event processor long running test * Remove utils.py * Remove close() method * Updated docstrings * add pytest * small fixes * Revert "Remove utils.py" This reverts commit a9446de31a6f33e8c86aeec0410c8fbb182f3188. * change asyncio.create_task to 3.5 friendly code * Remove Callable * raise CancelledError instead of break --- pylintrc | 4 +- .../{ => aio}/eventprocessor/__init__.py | 11 +- .../aio/eventprocessor/_ownership_manager.py | 133 ++++++++ .../aio/eventprocessor/event_processor.py | 271 +++++++++++++++ .../eventprocessor/partition_context.py} | 21 +- .../eventprocessor/partition_manager.py | 25 +- .../eventprocessor/partition_processor.py | 34 +- .../sample_partition_manager.py | 144 ++++++++ .../{ => aio}/eventprocessor/utils.py | 2 +- .../eventprocessor/event_processor.py | 218 ------------ .../sqlite3_partition_manager.py | 110 ------- .../eventprocessor/event_processor_example.py | 41 +-- .../test_long_running_eventprocessor.py | 20 +- .../test_eventprocessor.py | 311 ++++++++++++++++++ 14 files changed, 943 insertions(+), 402 deletions(-) rename sdk/eventhub/azure-eventhubs/azure/eventhub/{ => aio}/eventprocessor/__init__.py (67%) create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/_ownership_manager.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py rename sdk/eventhub/azure-eventhubs/azure/eventhub/{eventprocessor/checkpoint_manager.py => aio/eventprocessor/partition_context.py} (61%) rename sdk/eventhub/azure-eventhubs/azure/eventhub/{ => aio}/eventprocessor/partition_manager.py (83%) rename sdk/eventhub/azure-eventhubs/azure/eventhub/{ => aio}/eventprocessor/partition_processor.py (52%) create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/sample_partition_manager.py rename sdk/eventhub/azure-eventhubs/azure/eventhub/{ => aio}/eventprocessor/utils.py (96%) delete mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/event_processor.py delete mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/sqlite3_partition_manager.py create mode 100644 sdk/eventhub/azure-eventhubs/tests/eventprocessor_tests/test_eventprocessor.py diff --git a/pylintrc b/pylintrc index c5cddb6d9030..399344e8ea99 100644 --- a/pylintrc +++ b/pylintrc @@ -2,8 +2,8 @@ ignore-patterns=test_*,conftest,setup reports=no -# PYLINT DIRECTORY BLACKLIST. Ignore eventprocessor temporarily until new eventprocessor code is merged to master -ignore=_generated,samples,examples,test,tests,doc,.tox,eventprocessor +# PYLINT DIRECTORY BLACKLIST. +ignore=_generated,samples,examples,test,tests,doc,.tox init-hook='import sys; sys.path.insert(0, os.path.abspath(os.getcwd().rsplit("azure-sdk-for-python", 1)[0] + "azure-sdk-for-python/scripts/pylint_custom_plugin"))' load-plugins=pylint_guidelines_checker diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/__init__.py similarity index 67% rename from sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/__init__.py rename to sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/__init__.py index f4b48afac6f3..e3eefa4774f4 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/__init__.py @@ -5,13 +5,16 @@ from .event_processor import EventProcessor from .partition_processor import PartitionProcessor, CloseReason -from .partition_manager import PartitionManager -from .sqlite3_partition_manager import Sqlite3PartitionManager +from .partition_manager import PartitionManager, OwnershipLostError +from .partition_context import PartitionContext +from .sample_partition_manager import SamplePartitionManager __all__ = [ 'CloseReason', 'EventProcessor', 'PartitionProcessor', 'PartitionManager', - 'Sqlite3PartitionManager', -] \ No newline at end of file + 'OwnershipLostError', + 'PartitionContext', + 'SamplePartitionManager', +] diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/_ownership_manager.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/_ownership_manager.py new file mode 100644 index 000000000000..094ca8e0ce39 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/_ownership_manager.py @@ -0,0 +1,133 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +import time +import random +import math +from typing import List +from collections import Counter, defaultdict +from azure.eventhub.aio import EventHubClient +from .partition_manager import PartitionManager + + +class OwnershipManager(object): + """Increases or decreases the number of partitions owned by an EventProcessor + so the number of owned partitions are balanced among multiple EventProcessors + + An EventProcessor calls claim_ownership() of this class every x seconds, + where x is set by keyword argument "polling_interval" in EventProcessor, + to claim the ownership of partitions, create tasks for the claimed ownership, and cancel tasks that no longer belong + to the claimed ownership. + + """ + def __init__( + self, eventhub_client: EventHubClient, consumer_group_name: str, owner_id: str, + partition_manager: PartitionManager, ownership_timeout: float + ): + self.cached_parition_ids = [] # type: List[str] + self.eventhub_client = eventhub_client + self.eventhub_name = eventhub_client.eh_name + self.consumer_group_name = consumer_group_name + self.owner_id = owner_id + self.partition_manager = partition_manager + self.ownership_timeout = ownership_timeout + + async def claim_ownership(self): + """Claims ownership for this EventProcessor + 1. Retrieves all partition ids of an event hub from azure event hub service + 2. Retrieves current ownership list via this EventProcessor's PartitionManager. + 3. Balances number of ownership. Refer to _balance_ownership() for details. + 4. Claims the ownership for the balanced number of partitions. + + :return: List[Dict[Any]] + """ + if not self.cached_parition_ids: + await self._retrieve_partition_ids() + to_claim = await self._balance_ownership(self.cached_parition_ids) + claimed_list = await self.partition_manager.claim_ownership(to_claim) if to_claim else None + return claimed_list + + async def _retrieve_partition_ids(self): + """List all partition ids of the event hub that the EventProcessor is working on. + + :return: List[str] + """ + self.cached_parition_ids = await self.eventhub_client.get_partition_ids() + + async def _balance_ownership(self, all_partition_ids): + """Balances and claims ownership of partitions for this EventProcessor. + The balancing algorithm is: + 1. Find partitions with inactive ownership and partitions that haven never been claimed before + 2. Find the number of active owners, including this EventProcessor, for all partitions. + 3. Calculate the average count of partitions that an owner should own. + (number of partitions // number of active owners) + 4. Calculate the largest allowed count of partitions that an owner can own. + math.ceil(number of partitions / number of active owners). + This should be equal or 1 greater than the average count + 5. Adjust the number of partitions owned by this EventProcessor (owner) + a. if this EventProcessor owns more than largest allowed count, abandon one partition + b. if this EventProcessor owns less than average count, add one from the inactive or unclaimed partitions, + or steal one from another owner that has the largest number of ownership among all owners (EventProcessors) + c. Otherwise, no change to the ownership + + The balancing algorithm adjust one partition at a time to gradually build the balanced ownership. + Ownership must be renewed to keep it active. So the returned result includes both existing ownership and + the newly adjusted ownership. + This method balances but doesn't claim ownership. The caller of this method tries to claim the result ownership + list. But it may not successfully claim all of them because of concurrency. Other EventProcessors may happen to + claim a partition at that time. Since balancing and claiming are run in infinite repeatedly, + it achieves balancing among all EventProcessors after some time of running. + + :return: List[Dict[str, Any]], A list of ownership. + """ + ownership_list = await self.partition_manager.list_ownership( + self.eventhub_name, self.consumer_group_name + ) + now = time.time() + ownership_dict = {x["partition_id"]: x for x in ownership_list} # put the list to dict for fast lookup + not_owned_partition_ids = [pid for pid in all_partition_ids if pid not in ownership_dict] + timed_out_partition_ids = [ownership["partition_id"] for ownership in ownership_list + if ownership["last_modified_time"] + self.ownership_timeout < now] + claimable_partition_ids = not_owned_partition_ids + timed_out_partition_ids + active_ownership = [ownership for ownership in ownership_list + if ownership["last_modified_time"] + self.ownership_timeout >= now] + active_ownership_by_owner = defaultdict(list) + for ownership in active_ownership: + active_ownership_by_owner[ownership["owner_id"]].append(ownership) + active_ownership_self = active_ownership_by_owner[self.owner_id] + + # calculate expected count per owner + all_partition_count = len(all_partition_ids) + # owners_count is the number of active owners. If self.owner_id is not yet among the active owners, + # then plus 1 to include self. This will make owners_count >= 1. + owners_count = len(active_ownership_by_owner) + \ + (0 if self.owner_id in active_ownership_by_owner else 1) + expected_count_per_owner = all_partition_count // owners_count + most_count_allowed_per_owner = math.ceil(all_partition_count / owners_count) + # end of calculating expected count per owner + + to_claim = active_ownership_self + if len(active_ownership_self) > most_count_allowed_per_owner: # needs to abandon a partition + to_claim.pop() # abandon one partition if owned too many + elif len(active_ownership_self) < expected_count_per_owner: + # Either claims an inactive partition, or steals from other owners + if claimable_partition_ids: # claim an inactive partition if there is + random_partition_id = random.choice(claimable_partition_ids) + random_chosen_to_claim = ownership_dict.get(random_partition_id, + {"partition_id": random_partition_id, + "eventhub_name": self.eventhub_name, + "consumer_group_name": self.consumer_group_name + }) + random_chosen_to_claim["owner_id"] = self.owner_id + to_claim.append(random_chosen_to_claim) + else: # steal from another owner that has the most count + active_ownership_count_group_by_owner = Counter( + dict((x, len(y)) for x, y in active_ownership_by_owner.items())) + most_frequent_owner_id = active_ownership_count_group_by_owner.most_common(1)[0][0] + # randomly choose a partition to steal from the most_frequent_owner + to_steal_partition = random.choice(active_ownership_by_owner[most_frequent_owner_id]) + to_steal_partition["owner_id"] = self.owner_id + to_claim.append(to_steal_partition) + return to_claim diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py new file mode 100644 index 000000000000..37f9a20d67c5 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py @@ -0,0 +1,271 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +from typing import Dict, Type +import uuid +import asyncio +import logging + +from azure.eventhub import EventPosition, EventHubError +from azure.eventhub.aio import EventHubClient +from .partition_context import PartitionContext +from .partition_manager import PartitionManager, OwnershipLostError +from ._ownership_manager import OwnershipManager +from .partition_processor import CloseReason, PartitionProcessor +from .utils import get_running_loop + +log = logging.getLogger(__name__) + +OWNER_LEVEL = 0 + + +class EventProcessor(object): # pylint:disable=too-many-instance-attributes + """ + An EventProcessor constantly receives events from all partitions of the Event Hub in the context of a given + consumer group. The received data will be sent to PartitionProcessor to be processed. + + It provides the user a convenient way to receive events from multiple partitions and save checkpoints. + If multiple EventProcessors are running for an event hub, they will automatically balance load. + + Example: + .. code-block:: python + + import asyncio + import logging + import os + from azure.eventhub.aio import EventHubClient + from azure.eventhub.aio.eventprocessor import EventProcessor, PartitionProcessor + from azure.eventhub.aio.eventprocessor import SamplePartitionManager + + RECEIVE_TIMEOUT = 5 # timeout in seconds for a receiving operation. 0 or None means no timeout + RETRY_TOTAL = 3 # max number of retries for receive operations within the receive timeout. + # Actual number of retries clould be less if RECEIVE_TIMEOUT is too small + CONNECTION_STR = os.environ["EVENT_HUB_CONN_STR"] + + logging.basicConfig(level=logging.INFO) + + async def do_operation(event): + # do some sync or async operations. If the operation is i/o bound, async will have better performance + print(event) + + + class MyPartitionProcessor(PartitionProcessor): + async def process_events(self, events, partition_context): + if events: + await asyncio.gather(*[do_operation(event) for event in events]) + await partition_context.update_checkpoint(events[-1].offset, events[-1].sequence_number) + + async def main(): + client = EventHubClient.from_connection_string(CONNECTION_STR, receive_timeout=RECEIVE_TIMEOUT, + retry_total=RETRY_TOTAL) + partition_manager = SamplePartitionManager(db_filename=":memory:") # a filename to persist checkpoint + try: + event_processor = EventProcessor(client, "$default", MyPartitionProcessor, + partition_manager, polling_interval=10) + asyncio.create_task(event_processor.start()) + await asyncio.sleep(60) + await event_processor.stop() + finally: + await partition_manager.close() + + if __name__ == '__main__': + asyncio.get_event_loop().run_until_complete(main()) + + """ + def __init__( + self, eventhub_client: EventHubClient, consumer_group_name: str, + partition_processor_type: Type[PartitionProcessor], + partition_manager: PartitionManager, *, + initial_event_position: EventPosition = EventPosition("-1"), polling_interval: float = 10.0 + ): + """ + Instantiate an EventProcessor. + + :param eventhub_client: An instance of ~azure.eventhub.aio.EventClient object + :type eventhub_client: ~azure.eventhub.aio.EventClient + :param consumer_group_name: The name of the consumer group this event processor is associated with. Events will + be read only in the context of this group. + :type consumer_group_name: str + :param partition_processor_type: A subclass type of ~azure.eventhub.eventprocessor.PartitionProcessor. + :type partition_processor_type: type + :param partition_manager: Interacts with the storage system, dealing with ownership and checkpoints. + For an easy start, SamplePartitionManager comes with the package. + :type partition_manager: Class implementing the ~azure.eventhub.eventprocessor.PartitionManager. + :param initial_event_position: The event position to start a partition consumer. + if the partition has no checkpoint yet. This will be replaced by "reset" checkpoint in the near future. + :type initial_event_position: EventPosition + :param polling_interval: The interval between any two pollings of balancing and claiming + :type polling_interval: float + + """ + + self._consumer_group_name = consumer_group_name + self._eventhub_client = eventhub_client + self._eventhub_name = eventhub_client.eh_name + self._partition_processor_factory = partition_processor_type + self._partition_manager = partition_manager + self._initial_event_position = initial_event_position # will be replaced by reset event position in preview 4 + self._polling_interval = polling_interval + self._ownership_timeout = self._polling_interval * 2 + self._tasks = {} # type: Dict[str, asyncio.Task] + self._id = str(uuid.uuid4()) + self._running = False + + def __repr__(self): + return 'EventProcessor: id {}'.format(self._id) + + async def start(self): + """Start the EventProcessor. + + 1. Calls the OwnershipManager to keep claiming and balancing ownership of partitions in an + infinitely loop until self.stop() is called. + 2. Cancels tasks for partitions that are no longer owned by this EventProcessor + 3. Creates tasks for partitions that are newly claimed by this EventProcessor + 4. Keeps tasks running for partitions that haven't changed ownership + 5. Each task repeatedly calls EvenHubConsumer.receive() to retrieve events and + call user defined partition processor + + :return: None + + """ + log.info("EventProcessor %r is being started", self._id) + ownership_manager = OwnershipManager(self._eventhub_client, self._consumer_group_name, self._id, + self._partition_manager, self._ownership_timeout) + if not self._running: + self._running = True + while self._running: + try: + claimed_ownership_list = await ownership_manager.claim_ownership() + except Exception as err: + log.warning("An exception (%r) occurred during balancing and claiming ownership for eventhub %r " + "consumer group %r. Retrying after %r seconds", + err, self._eventhub_name, self._consumer_group_name, self._polling_interval) + await asyncio.sleep(self._polling_interval) + continue + + to_cancel_list = self._tasks.keys() + if claimed_ownership_list: + claimed_partition_ids = [x["partition_id"] for x in claimed_ownership_list] + to_cancel_list = self._tasks.keys() - claimed_partition_ids + self._create_tasks_for_claimed_ownership(claimed_ownership_list) + else: + log.info("EventProcessor %r hasn't claimed an ownership. It keeps claiming.", self._id) + if to_cancel_list: + self._cancel_tasks_for_partitions(to_cancel_list) + log.info("EventProcesor %r has cancelled partitions %r", self._id, to_cancel_list) + await asyncio.sleep(self._polling_interval) + + async def stop(self): + """Stop claiming ownership and all the partition consumers owned by this EventProcessor + + This method stops claiming ownership of owned partitions and cancels tasks that are running + EventHubConsumer.receive() for the partitions owned by this EventProcessor. + + :return: None + + """ + self._running = False + for _ in range(len(self._tasks)): + _, task = self._tasks.popitem() + task.cancel() + log.info("EventProcessor %r has been cancelled", self._id) + await asyncio.sleep(2) # give some time to finish after cancelled. + + def _cancel_tasks_for_partitions(self, to_cancel_partitions): + for partition_id in to_cancel_partitions: + if partition_id in self._tasks: + task = self._tasks.pop(partition_id) + task.cancel() + + def _create_tasks_for_claimed_ownership(self, to_claim_ownership_list): + for ownership in to_claim_ownership_list: + partition_id = ownership["partition_id"] + if partition_id not in self._tasks: + self._tasks[partition_id] = get_running_loop().create_task(self._receive(ownership)) + + async def _receive(self, ownership): + log.info("start ownership, %r", ownership) + partition_processor = self._partition_processor_factory() + partition_id = ownership["partition_id"] + eventhub_name = ownership["eventhub_name"] + consumer_group_name = ownership["consumer_group_name"] + owner_id = ownership["owner_id"] + partition_context = PartitionContext( + eventhub_name, + consumer_group_name, + partition_id, + owner_id, + self._partition_manager + ) + partition_consumer = self._eventhub_client.create_consumer( + consumer_group_name, + partition_id, + EventPosition(ownership.get("offset", self._initial_event_position.value)) + ) + + async def process_error(err): + log.warning( + "PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r" + " has met an error. The exception is %r.", + owner_id, eventhub_name, partition_id, consumer_group_name, err + ) + try: + await partition_processor.process_error(err, partition_context) + except Exception as err_again: # pylint:disable=broad-except + log.warning( + "PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r" + " has another error during running process_error(). The exception is %r.", + owner_id, eventhub_name, partition_id, consumer_group_name, err_again + ) + + async def close(reason): + log.info( + "PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r" + " is being closed. Reason is: %r", + owner_id, eventhub_name, partition_id, consumer_group_name, reason + ) + try: + await partition_processor.close(reason, partition_context) + except Exception as err: # pylint:disable=broad-except + log.warning( + "PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r" + " has an error during running close(). The exception is %r.", + owner_id, eventhub_name, partition_id, consumer_group_name, err + ) + + try: + while True: + try: + await partition_processor.initialize(partition_context) + events = await partition_consumer.receive() + await partition_processor.process_events(events, partition_context) + except asyncio.CancelledError: + log.info( + "PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r" + " is cancelled", + owner_id, + eventhub_name, + partition_id, + consumer_group_name + ) + if self._running is False: + await close(CloseReason.SHUTDOWN) + else: + await close(CloseReason.OWNERSHIP_LOST) + raise + except EventHubError as eh_err: + await process_error(eh_err) + await close(CloseReason.EVENTHUB_EXCEPTION) + # An EventProcessor will pick up this partition again after the ownership is released + break + except OwnershipLostError: + await close(CloseReason.OWNERSHIP_LOST) + break + except Exception as other_error: # pylint:disable=broad-except + await process_error(other_error) + await close(CloseReason.PROCESS_EVENTS_ERROR) + break + finally: + await partition_consumer.close() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/checkpoint_manager.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_context.py similarity index 61% rename from sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/checkpoint_manager.py rename to sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_context.py index 2714f675b28c..6aaf939143a2 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/checkpoint_manager.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_context.py @@ -7,30 +7,33 @@ from .partition_manager import PartitionManager -class CheckpointManager(object): - """ - CheckpointManager is responsible for the creation of checkpoints. - The interaction with the chosen storage service is done via ~azure.eventhub.eventprocessor.PartitionManager. +class PartitionContext(object): + """Contains partition related context information for a PartitionProcessor instance to use. + Users can use update_checkpoint() of this class to save checkpoint data. """ - def __init__(self, partition_id: str, eventhub_name: str, consumer_group_name: str, owner_id: str, partition_manager: PartitionManager): + def __init__(self, eventhub_name: str, consumer_group_name: str, + partition_id: str, owner_id: str, partition_manager: PartitionManager): self.partition_id = partition_id self.eventhub_name = eventhub_name self.consumer_group_name = consumer_group_name self.owner_id = owner_id - self.partition_manager = partition_manager + self._partition_manager = partition_manager async def update_checkpoint(self, offset, sequence_number=None): """ - Updates the checkpoint using the given information for the associated partition and consumer group in the chosen storage service. + Updates the checkpoint using the given information for the associated partition and consumer group in the + chosen storage service. :param offset: The offset of the ~azure.eventhub.EventData the new checkpoint will be associated with. :type offset: str - :param sequence_number: The sequence_number of the ~azure.eventhub.EventData the new checkpoint will be associated with. + :param sequence_number: The sequence_number of the ~azure.eventhub.EventData the new checkpoint will be + associated with. :type sequence_number: int :return: None """ - await self.partition_manager.update_checkpoint( + # TODO: whether change this method to accept event_data as well + await self._partition_manager.update_checkpoint( self.eventhub_name, self.consumer_group_name, self.partition_id, self.owner_id, offset, sequence_number ) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/partition_manager.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_manager.py similarity index 83% rename from sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/partition_manager.py rename to sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_manager.py index e4ecb1bec824..4bb84779dd53 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/partition_manager.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_manager.py @@ -34,15 +34,14 @@ async def list_ownership(self, eventhub_name: str, consumer_group_name: str) -> last_modified_time etag """ - pass @abstractmethod - async def claim_ownership(self, partitions: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]: + async def claim_ownership(self, ownership_list: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]: """ Tries to claim a list of specified ownership. - :param partitions: Iterable of dictionaries containing all the ownership to claim. - :type partitions: Iterable of dict + :param ownership_list: Iterable of dictionaries containing all the ownership to claim. + :type ownership_list: Iterable of dict :return: Iterable of dictionaries containing the following partition ownership information: eventhub_name consumer_group_name @@ -54,13 +53,13 @@ async def claim_ownership(self, partitions: Iterable[Dict[str, Any]]) -> Iterabl last_modified_time etag """ - pass @abstractmethod async def update_checkpoint(self, eventhub_name, consumer_group_name, partition_id, owner_id, offset, sequence_number) -> None: """ - Updates the checkpoint using the given information for the associated partition and consumer group in the chosen storage service. + Updates the checkpoint using the given information for the associated partition and + consumer group in the chosen storage service. :param eventhub_name: The name of the specific Event Hub the ownership are associated with, relative to the Event Hubs namespace that contains it. @@ -73,11 +72,15 @@ async def update_checkpoint(self, eventhub_name, consumer_group_name, partition_ :type owner_id: str :param offset: The offset of the ~azure.eventhub.EventData the new checkpoint will be associated with. :type offset: str - :param sequence_number: The sequence_number of the ~azure.eventhub.EventData the new checkpoint will be associated with. + :param sequence_number: The sequence_number of the ~azure.eventhub.EventData the new checkpoint + will be associated with. :type sequence_number: int - :return: + :return: None + :raise: `OwnershipLostError`, `CheckpointError` """ - pass - async def close(self): - pass + +class OwnershipLostError(Exception): + """Raises when update_checkpoint detects the ownership has been lost + + """ diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/partition_processor.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_processor.py similarity index 52% rename from sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/partition_processor.py rename to sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_processor.py index 10aafc79c492..8b0fb2ca7e5c 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/partition_processor.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_processor.py @@ -6,27 +6,33 @@ from typing import List from abc import ABC, abstractmethod from enum import Enum -from .checkpoint_manager import CheckpointManager - from azure.eventhub import EventData +from .partition_context import PartitionContext class CloseReason(Enum): SHUTDOWN = 0 # user call EventProcessor.stop() OWNERSHIP_LOST = 1 # lose the ownership of a partition. EVENTHUB_EXCEPTION = 2 # Exception happens during receiving events + PROCESS_EVENTS_ERROR = 3 # Exception happens during process_events class PartitionProcessor(ABC): """ PartitionProcessor processes events received from the Azure Event Hubs service. A single instance of a class - implementing this abstract class will be created for every partition the associated ~azure.eventhub.eventprocessor.EventProcessor owns. + implementing this abstract class will be created for every partition the associated + ~azure.eventhub.eventprocessor.EventProcessor owns. """ - def __init__(self, checkpoint_manager: CheckpointManager): - self._checkpoint_manager = checkpoint_manager - async def close(self, reason): + async def initialize(self, partition_context: PartitionContext): + """ + + :param partition_context: The context information of this partition. + :type partition_context: ~azure.eventhub.aio.eventprocessor.PartitionContext + """ + + async def close(self, reason, partition_context: PartitionContext): """Called when EventProcessor stops processing this PartitionProcessor. There are different reasons to trigger the PartitionProcessor to close. @@ -34,25 +40,31 @@ async def close(self, reason): :param reason: Reason for closing the PartitionProcessor. :type reason: ~azure.eventhub.eventprocessor.CloseReason + :param partition_context: The context information of this partition. + Use its method update_checkpoint to save checkpoint to the data store. + :type partition_context: ~azure.eventhub.aio.eventprocessor.PartitionContext """ - pass @abstractmethod - async def process_events(self, events: List[EventData]): + async def process_events(self, events: List[EventData], partition_context: PartitionContext): """Called when a batch of events have been received. :param events: Received events. :type events: list[~azure.eventhub.common.EventData] + :param partition_context: The context information of this partition. + Use its method update_checkpoint to save checkpoint to the data store. + :type partition_context: ~azure.eventhub.aio.eventprocessor.PartitionContext """ - pass - async def process_error(self, error): + async def process_error(self, error, partition_context: PartitionContext): """Called when an error happens :param error: The error that happens. :type error: Exception + :param partition_context: The context information of this partition. + Use its method update_checkpoint to save checkpoint to the data store. + :type partition_context: ~azure.eventhub.aio.eventprocessor.PartitionContext """ - pass diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/sample_partition_manager.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/sample_partition_manager.py new file mode 100644 index 000000000000..82559fc8c274 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/sample_partition_manager.py @@ -0,0 +1,144 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +import time +import uuid +import sqlite3 +import logging +from azure.eventhub.aio.eventprocessor import PartitionManager, OwnershipLostError + +logger = logging.getLogger(__name__) + + +def _check_table_name(table_name: str): + for c in table_name: + if not (c.isalnum() or c == "_"): + raise ValueError("Table name \"{}\" is not in correct format".format(table_name)) + return table_name + + +class SamplePartitionManager(PartitionManager): + """An implementation of PartitionManager by using the sqlite3 in Python standard library. + Sqlite3 is a mini sql database that runs in memory or files. + Please don't use this PartitionManager for production use. + + + """ + primary_keys_dict = {"eventhub_name": "text", "consumer_group_name": "text", "partition_id": "text"} + other_fields_dict = {"owner_id": "text", "owner_level": "integer", "sequence_number": "integer", "offset": "text", + "last_modified_time": "real", "etag": "text"} + checkpoint_fields = ["sequence_number", "offset"] + fields_dict = {**primary_keys_dict, **other_fields_dict} + primary_keys = list(primary_keys_dict.keys()) + other_fields = list(other_fields_dict.keys()) + fields = primary_keys + other_fields + + def __init__(self, db_filename: str = ":memory:", ownership_table: str = "ownership"): + """ + + :param db_filename: name of file that saves the sql data. + Sqlite3 will run in memory without a file when db_filename is ":memory:". + :param ownership_table: The table name of the sqlite3 database. + """ + super(SamplePartitionManager, self).__init__() + self.ownership_table = _check_table_name(ownership_table) + conn = sqlite3.connect(db_filename) + c = conn.cursor() + try: + sql = "create table if not exists " + _check_table_name(ownership_table)\ + + "("\ + + ",".join([x[0]+" "+x[1] for x in self.fields_dict.items()])\ + + ", constraint pk_ownership PRIMARY KEY ("\ + + ",".join(self.primary_keys)\ + + "))" + c.execute(sql) + finally: + c.close() + self.conn = conn + + async def list_ownership(self, eventhub_name, consumer_group_name): + cursor = self.conn.cursor() + try: + cursor.execute("select " + ",".join(self.fields) + + " from "+_check_table_name(self.ownership_table)+" where eventhub_name=? " + "and consumer_group_name=?", + (eventhub_name, consumer_group_name)) + return [dict(zip(self.fields, row)) for row in cursor.fetchall()] + finally: + cursor.close() + + async def claim_ownership(self, ownership_list): + result = [] + cursor = self.conn.cursor() + try: + for p in ownership_list: + cursor.execute("select etag from " + _check_table_name(self.ownership_table) + + " where "+ " and ".join([field+"=?" for field in self.primary_keys]), + tuple(p.get(field) for field in self.primary_keys)) + cursor_fetch = cursor.fetchall() + if not cursor_fetch: + p["last_modified_time"] = time.time() + p["etag"] = str(uuid.uuid4()) + try: + fields_without_checkpoint = list(filter(lambda x: x not in self.checkpoint_fields, self.fields)) + sql = "insert into " + _check_table_name(self.ownership_table) + " (" \ + + ",".join(fields_without_checkpoint) \ + + ") values (?,?,?,?,?,?,?)" + cursor.execute(sql, tuple(p.get(field) for field in fields_without_checkpoint)) + except sqlite3.OperationalError as op_err: + logger.info("EventProcessor %r failed to claim partition %r " + "because it was claimed by another EventProcessor at the same time. " + "The Sqlite3 exception is %r", p["owner_id"], p["partition_id"], op_err) + continue + else: + result.append(p) + else: + if p.get("etag") == cursor_fetch[0][0]: + p["last_modified_time"] = time.time() + p["etag"] = str(uuid.uuid4()) + other_fields_without_checkpoint = list( + filter(lambda x: x not in self.checkpoint_fields, self.other_fields) + ) + sql = "update " + _check_table_name(self.ownership_table) + " set "\ + + ','.join([field+"=?" for field in other_fields_without_checkpoint])\ + + " where "\ + + " and ".join([field+"=?" for field in self.primary_keys]) + + cursor.execute(sql, tuple(p.get(field) for field in other_fields_without_checkpoint) + + tuple(p.get(field) for field in self.primary_keys)) + result.append(p) + else: + logger.info("EventProcessor %r failed to claim partition %r " + "because it was claimed by another EventProcessor at the same time", p["owner_id"], + p["partition_id"]) + self.conn.commit() + return result + finally: + cursor.close() + + async def update_checkpoint(self, eventhub_name, consumer_group_name, partition_id, owner_id, + offset, sequence_number): + cursor = self.conn.cursor() + try: + cursor.execute("select owner_id from " + _check_table_name(self.ownership_table) + + " where eventhub_name=? and consumer_group_name=? and partition_id=?", + (eventhub_name, consumer_group_name, partition_id)) + cursor_fetch = cursor.fetchall() + if cursor_fetch and owner_id == cursor_fetch[0][0]: + cursor.execute("update " + _check_table_name(self.ownership_table) + + " set offset=?, sequence_number=? " + "where eventhub_name=? and consumer_group_name=? and partition_id=?", + (offset, sequence_number, eventhub_name, consumer_group_name, partition_id)) + self.conn.commit() + else: + logger.info("EventProcessor couldn't checkpoint to partition %r because it no longer has the ownership", + partition_id) + raise OwnershipLostError() + + finally: + cursor.close() + + async def close(self): + self.conn.close() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/utils.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/utils.py similarity index 96% rename from sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/utils.py rename to sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/utils.py index 368cd8469f10..1d8add0f49a0 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/utils.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/utils.py @@ -10,7 +10,7 @@ def get_running_loop(): try: return asyncio.get_running_loop() except AttributeError: # 3.5 / 3.6 - loop = asyncio._get_running_loop() # pylint: disable=protected-access + loop = asyncio._get_running_loop() # pylint: disable=protected-access, no-member if loop is None: raise RuntimeError('No running event loop') return loop diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/event_processor.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/event_processor.py deleted file mode 100644 index 71741c56dffa..000000000000 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/event_processor.py +++ /dev/null @@ -1,218 +0,0 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# ----------------------------------------------------------------------------------- - -from typing import Callable, List -import uuid -import asyncio -import logging - -from azure.eventhub import EventPosition, EventHubError -from azure.eventhub.aio import EventHubClient -from .checkpoint_manager import CheckpointManager -from .partition_manager import PartitionManager -from .partition_processor import PartitionProcessor, CloseReason -from .utils import get_running_loop - -log = logging.getLogger(__name__) - -OWNER_LEVEL = 0 - - -class EventProcessor(object): - """ - An EventProcessor constantly receives events from all partitions of the Event Hub in the context of a given - consumer group. The received data will be sent to PartitionProcessor to be processed. - - It provides the user a convenient way to receive events from multiple partitions and save checkpoints. - If multiple EventProcessors are running for an event hub, they will automatically balance load. - This load balancing won't be available until preview 3. - - Example: - .. code-block:: python - - class MyPartitionProcessor(PartitionProcessor): - async def process_events(self, events): - if events: - # do something sync or async to process the events - await self._checkpoint_manager.update_checkpoint(events[-1].offset, events[-1].sequence_number) - - import asyncio - from azure.eventhub.aio import EventHubClient - from azure.eventhub.eventprocessor import EventProcessor, PartitionProcessor, Sqlite3PartitionManager - client = EventHubClient.from_connection_string("", receive_timeout=5, retry_total=3) - partition_manager = Sqlite3PartitionManager() - try: - event_processor = EventProcessor(client, "$default", MyPartitionProcessor, partition_manager) - asyncio.ensure_future(event_processor.start()) - await asyncio.sleep(100) # allow it to run 100 seconds - await event_processor.stop() - finally: - await partition_manager.close() - - """ - def __init__(self, eventhub_client: EventHubClient, consumer_group_name: str, - partition_processor_factory: Callable[[CheckpointManager], PartitionProcessor], - partition_manager: PartitionManager, **kwargs): - """ - Instantiate an EventProcessor. - - :param eventhub_client: An instance of ~azure.eventhub.aio.EventClient object - :type eventhub_client: ~azure.eventhub.aio.EventClient - :param consumer_group_name: The name of the consumer group this event processor is associated with. Events will - be read only in the context of this group. - :type consumer_group_name: str - :param partition_processor_factory: A callable(type or function) object that creates an instance of a class - implementing the ~azure.eventhub.eventprocessor.PartitionProcessor. - :type partition_processor_factory: callable object - :param partition_manager: Interacts with the storage system, dealing with ownership and checkpoints. - For preview 2, sample Sqlite3PartitionManager is provided. - :type partition_manager: Class implementing the ~azure.eventhub.eventprocessor.PartitionManager. - :param initial_event_position: The offset to start a partition consumer if the partition has no checkpoint yet. - :type initial_event_position: int or str - - """ - self._consumer_group_name = consumer_group_name - self._eventhub_client = eventhub_client - self._eventhub_name = eventhub_client.eh_name - self._partition_processor_factory = partition_processor_factory - self._partition_manager = partition_manager - self._initial_event_position = kwargs.get("initial_event_position", "-1") - self._max_batch_size = eventhub_client._config.max_batch_size - self._receive_timeout = eventhub_client._config.receive_timeout - self._tasks = [] # type: List[asyncio.Task] - self._id = str(uuid.uuid4()) - - def __repr__(self): - return 'EventProcessor: id {}'.format(self._id) - - async def start(self): - """Start the EventProcessor. - - 1. retrieve the partition ids from eventhubs. - 2. claim partition ownership of these partitions. - 3. repeatedly call EvenHubConsumer.receive() to retrieve events and call user defined PartitionProcessor.process_events(). - - :return: None - - """ - log.info("EventProcessor %r is being started", self._id) - partition_ids = await self._eventhub_client.get_partition_ids() - claimed_list = await self._claim_partitions(partition_ids) - await self._start_claimed_partitions(claimed_list) - - async def stop(self): - """Stop all the partition consumer - - This method cancels tasks that are running EventHubConsumer.receive() for the partitions owned by this EventProcessor. - - :return: None - - """ - for i in range(len(self._tasks)): - task = self._tasks.pop() - task.cancel() - log.info("EventProcessor %r has been cancelled", self._id) - await asyncio.sleep(2) # give some time to finish after cancelled - - async def _claim_partitions(self, partition_ids): - partitions_ownership = await self._partition_manager.list_ownership(self._eventhub_name, self._consumer_group_name) - partitions_ownership_dict = dict() - for ownership in partitions_ownership: - partitions_ownership_dict[ownership["partition_id"]] = ownership - - to_claim_list = [] - for pid in partition_ids: - p_ownership = partitions_ownership_dict.get(pid) - if p_ownership: - to_claim_list.append(p_ownership) - else: - new_ownership = {"eventhub_name": self._eventhub_name, "consumer_group_name": self._consumer_group_name, - "owner_id": self._id, "partition_id": pid, "owner_level": OWNER_LEVEL} - to_claim_list.append(new_ownership) - claimed_list = await self._partition_manager.claim_ownership(to_claim_list) - return claimed_list - - async def _start_claimed_partitions(self, claimed_partitions): - for partition in claimed_partitions: - partition_id = partition["partition_id"] - offset = partition.get("offset", self._initial_event_position) - consumer = self._eventhub_client.create_consumer(self._consumer_group_name, partition_id, - EventPosition(str(offset))) - partition_processor = self._partition_processor_factory( - checkpoint_manager=CheckpointManager(partition_id, self._eventhub_name, self._consumer_group_name, - self._id, self._partition_manager) - ) - loop = get_running_loop() - task = loop.create_task( - _receive(consumer, partition_processor, self._receive_timeout)) - self._tasks.append(task) - try: - await asyncio.gather(*self._tasks) - finally: - log.info("EventProcessor %r has stopped", self._id) - - -async def _receive(partition_consumer, partition_processor, receive_timeout): - try: - while True: - try: - events = await partition_consumer.receive(timeout=receive_timeout) - except asyncio.CancelledError as cancelled_error: - log.info( - "PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r " - "is cancelled", - partition_processor._checkpoint_manager.owner_id, - partition_processor._checkpoint_manager.eventhub_name, - partition_processor._checkpoint_manager.partition_id, - partition_processor._checkpoint_manager.consumer_group_name - ) - await partition_processor.process_error(cancelled_error) - await partition_processor.close(reason=CloseReason.SHUTDOWN) - break - except EventHubError as eh_err: - reason = CloseReason.LEASE_LOST if eh_err.error == "link:stolen" else CloseReason.EVENTHUB_EXCEPTION - log.warning( - "PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r " - "has met an exception receiving events. It's being closed. The exception is %r.", - partition_processor._checkpoint_manager.owner_id, - partition_processor._checkpoint_manager.eventhub_name, - partition_processor._checkpoint_manager.partition_id, - partition_processor._checkpoint_manager.consumer_group_name, - eh_err - ) - await partition_processor.process_error(eh_err) - await partition_processor.close(reason=reason) - break - try: - await partition_processor.process_events(events) - except asyncio.CancelledError as cancelled_error: - log.info( - "PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r " - "is cancelled.", - partition_processor._checkpoint_manager.owner_id, - partition_processor._checkpoint_manager.eventhub_name, - partition_processor._checkpoint_manager.partition_id, - partition_processor._checkpoint_manager.consumer_group_name - ) - await partition_processor.process_error(cancelled_error) - await partition_processor.close(reason=CloseReason.SHUTDOWN) - break - except Exception as exp: # user code has caused an error - log.warning( - "PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r " - "has met an exception from user code process_events. It's being closed. The exception is %r.", - partition_processor._checkpoint_manager.owner_id, - partition_processor._checkpoint_manager.eventhub_name, - partition_processor._checkpoint_manager.partition_id, - partition_processor._checkpoint_manager.consumer_group_name, - exp - ) - await partition_processor.process_error(exp) - await partition_processor.close(reason=CloseReason.EVENTHUB_EXCEPTION) - break - # TODO: will review whether to break and close partition processor after user's code has an exception - # TODO: try to inform other EventProcessors to take the partition when this partition is closed in preview 3? - finally: - await partition_consumer.close() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/sqlite3_partition_manager.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/sqlite3_partition_manager.py deleted file mode 100644 index eb08e970fa89..000000000000 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/eventprocessor/sqlite3_partition_manager.py +++ /dev/null @@ -1,110 +0,0 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# ----------------------------------------------------------------------------------- - -import time -import uuid -import sqlite3 -from .partition_manager import PartitionManager - - -def _check_table_name(table_name: str): - for c in table_name: - if not (c.isalnum() or c == "_"): - raise ValueError("Table name \"{}\" is not in correct format".format(table_name)) - return table_name - - -class Sqlite3PartitionManager(PartitionManager): - """An implementation of PartitionManager by using the sqlite3 in Python standard library. - Sqlite3 is a mini sql database that runs in memory or files. - - - """ - def __init__(self, db_filename: str = ":memory:", ownership_table: str = "ownership"): - """ - - :param db_filename: name of file that saves the sql data. - Sqlite3 will run in memory without a file when db_filename is ":memory:". - :param ownership_table: The table name of the sqlite3 database. - """ - super(Sqlite3PartitionManager, self).__init__() - self.ownership_table = _check_table_name(ownership_table) - conn = sqlite3.connect(db_filename) - c = conn.cursor() - try: - c.execute("create table " + ownership_table + - "(eventhub_name text," - "consumer_group_name text," - "owner_id text," - "partition_id text," - "owner_level integer," - "sequence_number integer," - "offset text," - "last_modified_time integer," - "etag text)") - except sqlite3.OperationalError: - pass - finally: - c.close() - self.conn = conn - - async def list_ownership(self, eventhub_name, consumer_group_name): - cursor = self.conn.cursor() - try: - fields = ["eventhub_name", "consumer_group_name", "owner_id", "partition_id", "owner_level", - "sequence_number", - "offset", "last_modified_time", "etag"] - cursor.execute("select " + ",".join(fields) + - " from "+_check_table_name(self.ownership_table)+" where eventhub_name=? " - "and consumer_group_name=?", - (eventhub_name, consumer_group_name)) - result_list = [] - - for row in cursor.fetchall(): - d = dict(zip(fields, row)) - result_list.append(d) - return result_list - finally: - cursor.close() - - async def claim_ownership(self, partitions): - cursor = self.conn.cursor() - try: - for p in partitions: - cursor.execute("select * from " + _check_table_name(self.ownership_table) + - " where eventhub_name=? " - "and consumer_group_name=? " - "and partition_id =?", - (p["eventhub_name"], p["consumer_group_name"], - p["partition_id"])) - if not cursor.fetchall(): - cursor.execute("insert into " + _check_table_name(self.ownership_table) + - " (eventhub_name,consumer_group_name,partition_id,owner_id,owner_level,last_modified_time,etag) " - "values (?,?,?,?,?,?,?)", - (p["eventhub_name"], p["consumer_group_name"], p["partition_id"], p["owner_id"], p["owner_level"], - time.time(), str(uuid.uuid4()) - )) - else: - cursor.execute("update " + _check_table_name(self.ownership_table) + " set owner_id=?, owner_level=?, last_modified_time=?, etag=? " - "where eventhub_name=? and consumer_group_name=? and partition_id=?", - (p["owner_id"], p["owner_level"], time.time(), str(uuid.uuid4()), - p["eventhub_name"], p["consumer_group_name"], p["partition_id"])) - self.conn.commit() - return partitions - finally: - cursor.close() - - async def update_checkpoint(self, eventhub_name, consumer_group_name, partition_id, owner_id, - offset, sequence_number): - cursor = self.conn.cursor() - try: - cursor.execute("update " + _check_table_name(self.ownership_table) + " set offset=?, sequence_number=? where eventhub_name=? and consumer_group_name=? and partition_id=?", - (offset, sequence_number, eventhub_name, consumer_group_name, partition_id)) - self.conn.commit() - finally: - cursor.close() - - async def close(self): - self.conn.close() diff --git a/sdk/eventhub/azure-eventhubs/examples/eventprocessor/event_processor_example.py b/sdk/eventhub/azure-eventhubs/examples/eventprocessor/event_processor_example.py index 8c4c9ced7d29..c0826e274704 100644 --- a/sdk/eventhub/azure-eventhubs/examples/eventprocessor/event_processor_example.py +++ b/sdk/eventhub/azure-eventhubs/examples/eventprocessor/event_processor_example.py @@ -2,9 +2,8 @@ import logging import os from azure.eventhub.aio import EventHubClient -from azure.eventhub.eventprocessor import EventProcessor -from azure.eventhub.eventprocessor import PartitionProcessor -from azure.eventhub.eventprocessor import Sqlite3PartitionManager +from azure.eventhub.aio.eventprocessor import EventProcessor, PartitionProcessor +from azure.eventhub.aio.eventprocessor import SamplePartitionManager RECEIVE_TIMEOUT = 5 # timeout in seconds for a receiving operation. 0 or None means no timeout RETRY_TOTAL = 3 # max number of retries for receive operations within the receive timeout. Actual number of retries clould be less if RECEIVE_TIMEOUT is too small @@ -19,32 +18,22 @@ async def do_operation(event): class MyPartitionProcessor(PartitionProcessor): - def __init__(self, checkpoint_manager): - super(MyPartitionProcessor, self).__init__(checkpoint_manager) - - async def process_events(self, events): + async def process_events(self, events, partition_context): if events: await asyncio.gather(*[do_operation(event) for event in events]) - await self._checkpoint_manager.update_checkpoint(events[-1].offset, events[-1].sequence_number) - - -def partition_processor_factory(checkpoint_manager): - return MyPartitionProcessor(checkpoint_manager) - - -async def run_awhile(duration): - client = EventHubClient.from_connection_string(CONNECTION_STR, receive_timeout=RECEIVE_TIMEOUT, - retry_total=RETRY_TOTAL) - partition_manager = Sqlite3PartitionManager() - event_processor = EventProcessor(client, "$default", MyPartitionProcessor, partition_manager) - try: - asyncio.ensure_future(event_processor.start()) - await asyncio.sleep(duration) - await event_processor.stop() - finally: - await partition_manager.close() + await partition_context.update_checkpoint(events[-1].offset, events[-1].sequence_number) + else: + print("empty events received", "partition:", partition_context.partition_id) if __name__ == '__main__': loop = asyncio.get_event_loop() - loop.run_until_complete(run_awhile(60)) + client = EventHubClient.from_connection_string(CONNECTION_STR, receive_timeout=RECEIVE_TIMEOUT, retry_total=RETRY_TOTAL) + partition_manager = SamplePartitionManager(db_filename="eventprocessor_test_db") + event_processor = EventProcessor(client, "$default", MyPartitionProcessor, partition_manager, polling_interval=1) + try: + loop.run_until_complete(event_processor.start()) + except KeyboardInterrupt: + loop.run_until_complete(event_processor.stop()) + finally: + loop.stop() diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_long_running_eventprocessor.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_long_running_eventprocessor.py index 741a521d8fef..1e3cae9eefa7 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_long_running_eventprocessor.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_long_running_eventprocessor.py @@ -13,7 +13,7 @@ from logging.handlers import RotatingFileHandler from azure.eventhub.aio import EventHubClient -from azure.eventhub.eventprocessor import EventProcessor, PartitionProcessor, Sqlite3PartitionManager +from azure.eventhub.aio.eventprocessor import EventProcessor, PartitionProcessor, SamplePartitionManager from azure.eventhub import EventData @@ -44,23 +44,23 @@ def get_logger(filename, level=logging.INFO): class MyEventProcessor(PartitionProcessor): - async def close(self, reason): + async def close(self, reason, partition_context): logger.info("PartitionProcessor closed (reason {}, id {})".format( reason, - self._checkpoint_manager.partition_id + partition_context.partition_id )) - async def process_events(self, events): + async def process_events(self, events, partition_context): if events: event = events[-1] print("Processing id {}, offset {}, sq_number {})".format( - self._checkpoint_manager.partition_id, + partition_context.partition_id, event.offset, event.sequence_number)) - await self._checkpoint_manager.update_checkpoint(event.offset, event.sequence_number) + await partition_context.update_checkpoint(event.offset, event.sequence_number) - async def process_error(self, error): - logger.info("Event Processor Error for partition {}, {!r}".format(self._checkpoint_manager.partition_id, error)) + async def process_error(self, error, partition_context): + logger.info("Event Processor Error for partition {}, {!r}".format(partition_context.partition_id, error)) async def wait_and_close(host, duration): @@ -133,7 +133,7 @@ async def test_long_running_eph(live_eventhub): client, live_eventhub['consumer_group'], MyEventProcessor, - Sqlite3PartitionManager() + SamplePartitionManager() ) tasks = asyncio.gather( @@ -153,4 +153,4 @@ async def test_long_running_eph(live_eventhub): config['consumer_group'] = "$Default" config['partition'] = "0" loop = asyncio.get_event_loop() - loop.run_until_complete(test_long_running_eph(config)) \ No newline at end of file + loop.run_until_complete(test_long_running_eph(config)) diff --git a/sdk/eventhub/azure-eventhubs/tests/eventprocessor_tests/test_eventprocessor.py b/sdk/eventhub/azure-eventhubs/tests/eventprocessor_tests/test_eventprocessor.py new file mode 100644 index 000000000000..93cf137e1af5 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/tests/eventprocessor_tests/test_eventprocessor.py @@ -0,0 +1,311 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import pytest +import asyncio + +from azure.eventhub import EventData, EventHubError +from azure.eventhub.aio import EventHubClient +from azure.eventhub.aio.eventprocessor import EventProcessor, SamplePartitionManager, PartitionProcessor, \ + CloseReason, OwnershipLostError + + +class LoadBalancerPartitionProcessor(PartitionProcessor): + async def process_events(self, events, partition_context): + pass + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_loadbalancer_balance(connstr_senders): + + connection_str, senders = connstr_senders + for sender in senders: + sender.send(EventData("EventProcessor Test")) + eventhub_client = EventHubClient.from_connection_string(connection_str, receive_timeout=3) + partition_manager = SamplePartitionManager() + + event_processor1 = EventProcessor(eventhub_client, "$default", LoadBalancerPartitionProcessor, + partition_manager, polling_interval=1) + asyncio.ensure_future(event_processor1.start()) + await asyncio.sleep(5) + assert len(event_processor1._tasks) == 2 # event_processor1 claims two partitions + + event_processor2 = EventProcessor(eventhub_client, "$default", LoadBalancerPartitionProcessor, + partition_manager, polling_interval=1) + + asyncio.ensure_future(event_processor2.start()) + await asyncio.sleep(5) + assert len(event_processor1._tasks) == 1 # two event processors balance. So each has 1 task + assert len(event_processor2._tasks) == 1 + + event_processor3 = EventProcessor(eventhub_client, "$default", LoadBalancerPartitionProcessor, + partition_manager, polling_interval=1) + asyncio.ensure_future(event_processor3.start()) + await asyncio.sleep(5) + assert len(event_processor3._tasks) == 0 + await event_processor3.stop() + + await event_processor1.stop() + await asyncio.sleep(5) + assert len(event_processor2._tasks) == 2 # event_procesor2 takes another one after event_processor1 stops + await event_processor2.stop() + + +@pytest.mark.asyncio +async def test_load_balancer_abandon(): + class TestPartitionProcessor(PartitionProcessor): + async def process_events(self, events, partition_context): + await asyncio.sleep(0.1) + + class MockEventHubClient(object): + eh_name = "test_eh_name" + + def create_consumer(self, consumer_group_name, partition_id, event_position): + return MockEventhubConsumer() + + async def get_partition_ids(self): + return [str(pid) for pid in range(6)] + + class MockEventhubConsumer(object): + async def receive(self): + return [] + + partition_manager = SamplePartitionManager() + + event_processor = EventProcessor(MockEventHubClient(), "$default", TestPartitionProcessor, + partition_manager, polling_interval=0.5) + asyncio.get_running_loop().create_task(event_processor.start()) + await asyncio.sleep(5) + + ep_list = [] + for _ in range(2): + ep = EventProcessor(MockEventHubClient(), "$default", TestPartitionProcessor, + partition_manager, polling_interval=0.5) + asyncio.get_running_loop().create_task(ep.start()) + ep_list.append(ep) + await asyncio.sleep(5) + assert len(event_processor._tasks) == 2 + for ep in ep_list: + await ep.stop() + await event_processor.stop() + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_loadbalancer_list_ownership_error(connstr_senders): + class ErrorPartitionManager(SamplePartitionManager): + async def list_ownership(self, eventhub_name, consumer_group_name): + raise RuntimeError("Test runtime error") + + connection_str, senders = connstr_senders + for sender in senders: + sender.send(EventData("EventProcessor Test")) + eventhub_client = EventHubClient.from_connection_string(connection_str, receive_timeout=3) + partition_manager = ErrorPartitionManager() + + event_processor = EventProcessor(eventhub_client, "$default", LoadBalancerPartitionProcessor, + partition_manager, polling_interval=1) + asyncio.ensure_future(event_processor.start()) + await asyncio.sleep(5) + assert event_processor._running is True + assert len(event_processor._tasks) == 0 + await event_processor.stop() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_partition_processor(connstr_senders): + partition_processor1 = None + partition_processor2 = None + + class TestPartitionProcessor(PartitionProcessor): + def __init__(self): + self.initialize_called = False + self.error = None + self.close_reason = None + self.received_events = [] + self.checkpoint = None + + async def initialize(self, partition_context): + nonlocal partition_processor1, partition_processor2 + if partition_context.partition_id == "1": + partition_processor1 = self + else: + partition_processor2 = self + + async def process_events(self, events, partition_context): + self.received_events.extend(events) + if events: + offset, sn = events[-1].offset, events[-1].sequence_number + await partition_context.update_checkpoint(offset, sn) + self.checkpoint = (offset, sn) + + async def process_error(self, error, partition_context): + self.error = error + assert partition_context is not None + + async def close(self, reason, partition_context): + self.close_reason = reason + assert partition_context is not None + + connection_str, senders = connstr_senders + for sender in senders: + sender.send(EventData("EventProcessor Test")) + eventhub_client = EventHubClient.from_connection_string(connection_str, receive_timeout=3) + partition_manager = SamplePartitionManager() + + event_processor = EventProcessor(eventhub_client, "$default", TestPartitionProcessor, + partition_manager, polling_interval=1) + asyncio.ensure_future(event_processor.start()) + await asyncio.sleep(10) + await event_processor.stop() + assert partition_processor1 is not None and partition_processor2 is not None + assert len(partition_processor1.received_events) == 1 and len(partition_processor2.received_events) == 1 + assert partition_processor1.checkpoint is not None + assert partition_processor1.close_reason == CloseReason.SHUTDOWN + assert partition_processor1.error is None + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_partition_processor_process_events_error(connstr_senders): + class ErrorPartitionProcessor(PartitionProcessor): + async def process_events(self, events, partition_context): + if partition_context.partition_id == "1": + raise RuntimeError("processing events error") + else: + pass + + async def process_error(self, error, partition_context): + if partition_context.partition_id == "1": + assert isinstance(error, RuntimeError) + else: + raise RuntimeError("There shouldn't be an error for partition other than 1") + + async def close(self, reason, partition_context): + if partition_context.partition_id == "1": + assert reason == CloseReason.PROCESS_EVENTS_ERROR + else: + assert reason == CloseReason.SHUTDOWN + + connection_str, senders = connstr_senders + for sender in senders: + sender.send(EventData("EventProcessor Test")) + eventhub_client = EventHubClient.from_connection_string(connection_str, receive_timeout=3) + partition_manager = SamplePartitionManager() + + event_processor = EventProcessor(eventhub_client, "$default", ErrorPartitionProcessor, + partition_manager, polling_interval=1) + asyncio.ensure_future(event_processor.start()) + await asyncio.sleep(10) + await event_processor.stop() + + +@pytest.mark.asyncio +async def test_partition_processor_process_eventhub_consumer_error(): + class TestPartitionProcessor(PartitionProcessor): + async def process_events(self, events, partition_context): + pass + + async def process_error(self, error, partition_context): + assert isinstance(error, EventHubError) + + async def close(self, reason, partition_context): + assert reason == CloseReason.EVENTHUB_EXCEPTION + + class MockEventHubClient(object): + eh_name = "test_eh_name" + + def create_consumer(self, consumer_group_name, partition_id, event_position): + return MockEventhubConsumer() + + async def get_partition_ids(self): + return ["0", "1"] + + class MockEventhubConsumer(object): + async def receive(self): + raise EventHubError("Mock EventHubConsumer EventHubError") + + eventhub_client = MockEventHubClient() + partition_manager = SamplePartitionManager() + + event_processor = EventProcessor(eventhub_client, "$default", TestPartitionProcessor, + partition_manager, polling_interval=1) + asyncio.ensure_future(event_processor.start()) + await asyncio.sleep(5) + await event_processor.stop() + + +@pytest.mark.asyncio +async def test_partition_processor_process_error_close_error(): + class TestPartitionProcessor(PartitionProcessor): + async def process_events(self, events, partition_context): + raise RuntimeError("process_error") + + async def process_error(self, error, partition_context): + assert isinstance(error, RuntimeError) + raise RuntimeError("error from process_error") + + async def close(self, reason, partition_context): + assert reason == CloseReason.PROCESS_EVENTS_ERROR + raise RuntimeError("close error") + + class MockEventHubClient(object): + eh_name = "test_eh_name" + + def create_consumer(self, consumer_group_name, partition_id, event_position): + return MockEventhubConsumer() + + async def get_partition_ids(self): + return ["0", "1"] + + class MockEventhubConsumer(object): + async def receive(self): + return [EventData("mock events")] + + eventhub_client = MockEventHubClient() #EventHubClient.from_connection_string(connection_str, receive_timeout=3) + partition_manager = SamplePartitionManager() + + event_processor = EventProcessor(eventhub_client, "$default", TestPartitionProcessor, + partition_manager, polling_interval=1) + asyncio.ensure_future(event_processor.start()) + await asyncio.sleep(5) + await event_processor.stop() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_partition_processor_process_update_checkpoint_error(connstr_senders): + class ErrorPartitionManager(SamplePartitionManager): + async def update_checkpoint(self, eventhub_name, consumer_group_name, partition_id, owner_id, + offset, sequence_number): + if partition_id == "1": + raise OwnershipLostError("Mocked ownership lost") + + class TestPartitionProcessor(PartitionProcessor): + async def process_events(self, events, partition_context): + if events: + await partition_context.update_checkpoint(events[-1].offset, events[-1].sequence_number) + + async def process_error(self, error, partition_context): + assert isinstance(error, OwnershipLostError) + + async def close(self, reason, partition_context): + if partition_context.partition_id == "1": + assert reason == CloseReason.OWNERSHIP_LOST + else: + assert reason == CloseReason.SHUTDOWN + + connection_str, senders = connstr_senders + for sender in senders: + sender.send(EventData("EventProcessor Test")) + eventhub_client = EventHubClient.from_connection_string(connection_str, receive_timeout=3) + partition_manager = ErrorPartitionManager() + + event_processor = EventProcessor(eventhub_client, "$default", TestPartitionProcessor, + partition_manager, polling_interval=1) + asyncio.ensure_future(event_processor.start()) + await asyncio.sleep(10) + await event_processor.stop() From 8e7e1c13c2430f8573894a6ad80feffc4b099842 Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 6 Sep 2019 22:00:53 -0700 Subject: [PATCH 33/51] Fix a pylint error --- .../azure/eventhub/aio/eventprocessor/event_processor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py index 37f9a20d67c5..77f534e9f362 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py @@ -138,7 +138,7 @@ async def start(self): while self._running: try: claimed_ownership_list = await ownership_manager.claim_ownership() - except Exception as err: + except Exception as err: # pylint:disable=broad-except log.warning("An exception (%r) occurred during balancing and claiming ownership for eventhub %r " "consumer group %r. Retrying after %r seconds", err, self._eventhub_name, self._consumer_group_name, self._polling_interval) From 13a8fe7b346febc1fb1fdc859d6204ff3c124e9c Mon Sep 17 00:00:00 2001 From: Yijun Xie <48257664+YijunXieMS@users.noreply.github.com> Date: Fri, 6 Sep 2019 23:34:53 -0700 Subject: [PATCH 34/51] Eventhubs blobstorage checkpointstore merge to preview3 (#7109) --- .../HISTORY.md | 0 .../LICENSE | 21 +++ .../MANIFEST.in | 2 + .../README.md | 0 .../azure/__init__.py | 5 + .../azure/eventhub/__init__.py | 5 + .../azure/eventhub/extensions/__init__.py | 5 + .../checkpointstoreblobaio/__init__.py | 12 ++ .../checkpointstoreblobaio/blobstoragepm.py | 123 ++++++++++++++++++ .../conftest.py | 0 .../dev_requirements.txt | 1 + .../event_processor_blob_storage_example.py | 42 ++++++ .../sdk_packaging.toml | 2 + .../setup.cfg | 2 + .../setup.py | 75 +++++++++++ 15 files changed, 295 insertions(+) create mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/HISTORY.md create mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/LICENSE create mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/MANIFEST.in create mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/README.md create mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/__init__.py create mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/__init__.py create mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/__init__.py create mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/__init__.py create mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/blobstoragepm.py create mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/conftest.py create mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/dev_requirements.txt create mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/examples/event_processor_blob_storage_example.py create mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/sdk_packaging.toml create mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.cfg create mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.py diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/HISTORY.md b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/HISTORY.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/LICENSE b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/LICENSE new file mode 100644 index 000000000000..21071075c245 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/MANIFEST.in b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/MANIFEST.in new file mode 100644 index 000000000000..50c61fef797b --- /dev/null +++ b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/MANIFEST.in @@ -0,0 +1,2 @@ +include *.md +include azure/__init__.py \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/README.md b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/README.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/__init__.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/__init__.py new file mode 100644 index 000000000000..62351a0ab30b --- /dev/null +++ b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/__init__.py @@ -0,0 +1,5 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/__init__.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/__init__.py new file mode 100644 index 000000000000..62351a0ab30b --- /dev/null +++ b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/__init__.py @@ -0,0 +1,5 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/__init__.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/__init__.py new file mode 100644 index 000000000000..62351a0ab30b --- /dev/null +++ b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/__init__.py @@ -0,0 +1,5 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/__init__.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/__init__.py new file mode 100644 index 000000000000..9e0e473c9a8b --- /dev/null +++ b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/__init__.py @@ -0,0 +1,12 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +__version__ = "1.0.0b1" + +from .blobstoragepm import BlobPartitionManager + +__all__ = [ + "BlobPartitionManager", +] diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/blobstoragepm.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/blobstoragepm.py new file mode 100644 index 000000000000..85fcc1ca2eb5 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/blobstoragepm.py @@ -0,0 +1,123 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +from typing import Iterable, Dict, Any +import logging +from collections import defaultdict +import asyncio +from azure.eventhub.aio.eventprocessor import PartitionManager, OwnershipLostError # type: ignore +from azure.core.exceptions import ResourceModifiedError, ResourceExistsError # type: ignore +from azure.storage.blob.aio import ContainerClient # type: ignore + +logger = logging.getLogger(__name__) +UPLOAD_DATA = "" + + +class BlobPartitionManager(PartitionManager): + """An PartitionManager that uses Azure Blob Storage to store the partition ownership and checkpoint data. + + This class implements methods list_ownership, claim_ownership, and update_checkpoint that are defined in class + azure.eventhub.eventprocessor.PartitionManager of package azure-eventhub. + + """ + def __init__(self, container_client: ContainerClient): + """ + + :param container_client: The Azure Blob Storage Container client. + """ + self._container_client = container_client + self._cached_ownership_dict = defaultdict(dict) # type: Dict[str, Dict[str, Any]] + # lock each partition for list_ownership, claim_ownership and update_checkpoint etag doesn't get out of sync + # when the three methods are running concurrently + self._cached_ownership_locks = defaultdict(asyncio.Lock) # type:Dict[str, asyncio.Lock] + + async def _upload_blob(self, ownership, metadata): + etag = ownership.get("etag") + if etag: + etag_match = {"if_match": etag} + else: + etag_match = {"if_none_match": '*'} + partition_id = ownership["partition_id"] + blob_client = await self._container_client.upload_blob( + name=partition_id, data=UPLOAD_DATA, overwrite=True, metadata=metadata, **etag_match + ) + uploaded_blob_properties = await blob_client.get_blob_properties() + ownership["etag"] = uploaded_blob_properties.etag + ownership["last_modified_time"] = uploaded_blob_properties.last_modified.timestamp() + + async def list_ownership(self, eventhub_name: str, consumer_group_name: str) -> Iterable[Dict[str, Any]]: + try: + blobs = self._container_client.list_blobs(include=['metadata']) + except Exception as err: # pylint:disable=broad-except + logger.warning("An exception occurred during list_ownership for eventhub %r consumer group %r. " + "Exception is %r", eventhub_name, consumer_group_name, err) + raise + async for b in blobs: + async with self._cached_ownership_locks[b.name]: + if b.name not in self._cached_ownership_dict \ + or b.last_modified.timestamp() >= self._cached_ownership_dict[b.name].get("last_modified_time"): + metadata = b.metadata + ownership = { + "eventhub_name": eventhub_name, + "consumer_group_name": consumer_group_name, + "partition_id": b.name, + "owner_id": metadata["owner_id"], + "etag": b.etag, + "last_modified_time": b.last_modified.timestamp() if b.last_modified else None + } + ownership.update(metadata) + self._cached_ownership_dict[b.name] = ownership + return self._cached_ownership_dict.values() + + async def claim_ownership(self, ownership_list: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]: + result = [] + for ownership in ownership_list: + partition_id = ownership["partition_id"] + eventhub_name = ownership["eventhub_name"] + consumer_group_name = ownership["consumer_group_name"] + owner_id = ownership["owner_id"] + + async with self._cached_ownership_locks[partition_id]: + metadata = {"owner_id": ownership["owner_id"]} + if "offset" in ownership: + metadata["offset"] = ownership["offset"] + if "sequence_number" in ownership: + metadata["sequence_number"] = ownership["sequence_number"] + try: + await self._upload_blob(ownership, metadata) + self._cached_ownership_dict[partition_id] = ownership + result.append(ownership) + except (ResourceModifiedError, ResourceExistsError): + logger.info( + "EventProcessor instance %r of eventhub %r consumer group %r lost ownership to partition %r", + owner_id, eventhub_name, consumer_group_name, partition_id) + except Exception as err: # pylint:disable=broad-except + logger.warning("An exception occurred when EventProcessor instance %r claim_ownership for " + "eventhub %r consumer group %r partition %r. The ownership is now lost. Exception " + "is %r", owner_id, eventhub_name, consumer_group_name, partition_id, err) + return result + + async def update_checkpoint(self, eventhub_name, consumer_group_name, partition_id, owner_id, + offset, sequence_number) -> None: + metadata = { + "owner_id": owner_id, + "offset": offset, + "sequence_number": str(sequence_number) + } + cached_ownership = self._cached_ownership_dict[partition_id] + async with self._cached_ownership_locks[partition_id]: + try: + await self._upload_blob(cached_ownership, metadata) + except (ResourceModifiedError, ResourceExistsError): + logger.info( + "EventProcessor instance %r of eventhub %r consumer group %r couldn't update_checkpoint to " + "partition %r because the ownership has been stolen", + owner_id, eventhub_name, consumer_group_name, partition_id) + raise OwnershipLostError() + except Exception as err: + logger.warning( + "EventProcessor instance %r of eventhub %r consumer group %r couldn't update_checkpoint to " + "partition %r because of unexpected error. Exception is %r", + owner_id, eventhub_name, consumer_group_name, partition_id, err) + raise # EventProcessor will catch the exception and handle it diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/conftest.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/conftest.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/dev_requirements.txt b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/dev_requirements.txt new file mode 100644 index 000000000000..092dbcdb7de7 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/dev_requirements.txt @@ -0,0 +1 @@ +-e ../../eventhub/azure-eventhubs \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/examples/event_processor_blob_storage_example.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/examples/event_processor_blob_storage_example.py new file mode 100644 index 000000000000..e7edc047831a --- /dev/null +++ b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/examples/event_processor_blob_storage_example.py @@ -0,0 +1,42 @@ +import asyncio +import logging +import os +from azure.eventhub.aio import EventHubClient +from azure.eventhub.aio.eventprocessor import EventProcessor, PartitionProcessor +from azure.eventhub.extensions.checkpointstoreblobaio import BlobPartitionManager +from azure.storage.blob.aio import ContainerClient + +RECEIVE_TIMEOUT = 5 # timeout in seconds for a receiving operation. 0 or None means no timeout +RETRY_TOTAL = 3 # max number of retries for receive operations within the receive timeout. Actual number of retries clould be less if RECEIVE_TIMEOUT is too small +CONNECTION_STR = os.environ["EVENT_HUB_CONN_STR"] +STORAGE_CONNECTION_STR = os.environ["AZURE_STORAGE_CONN_STR"] + +logging.basicConfig(level=logging.INFO) + + +async def do_operation(event): + # do some sync or async operations. If the operation is i/o intensive, async will have better performance + print(event) + + +class MyPartitionProcessor(PartitionProcessor): + async def process_events(self, events, partition_context): + if events: + await asyncio.gather(*[do_operation(event) for event in events]) + await partition_context.update_checkpoint(events[-1].offset, events[-1].sequence_number) + else: + print("empty events received", "partition:", partition_context.partition_id) + + +if __name__ == '__main__': + loop = asyncio.get_event_loop() + client = EventHubClient.from_connection_string(CONNECTION_STR, receive_timeout=RECEIVE_TIMEOUT, retry_total=RETRY_TOTAL) + container_client = ContainerClient.from_connection_string(STORAGE_CONNECTION_STR, container="eventprocessor") + partition_manager = BlobPartitionManager(container_client=container_client) + event_processor = EventProcessor(client, "$default", MyPartitionProcessor, partition_manager, polling_interval=10) + try: + loop.run_until_complete(event_processor.start()) + except KeyboardInterrupt: + loop.run_until_complete(event_processor.stop()) + finally: + loop.stop() diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/sdk_packaging.toml b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/sdk_packaging.toml new file mode 100644 index 000000000000..e7687fdae93b --- /dev/null +++ b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/sdk_packaging.toml @@ -0,0 +1,2 @@ +[packaging] +auto_update = false \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.cfg b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.cfg new file mode 100644 index 000000000000..3480374bc2f2 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.cfg @@ -0,0 +1,2 @@ +[bdist_wheel] +universal=1 \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.py new file mode 100644 index 000000000000..257854d88cb0 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python + +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import re +import os.path +import sys +from io import open +from setuptools import find_packages, setup + +if sys.version_info < (3, 5, 3): + raise RuntimeError('Only python 3.5.3 or above is supported') + +# Change the PACKAGE_NAME only to change folder and different name +PACKAGE_NAME = "azure-eventhub-checkpointstoreblob-aio" +PACKAGE_PPRINT_NAME = "Event Hubs checkpointer implementation with Blob Storage" + +package_folder_path = "azure/eventhub/extensions/checkpointstoreblobaio" +namespace_name = "azure.eventhub.extensions.checkpointstoreblobaio" + +# Version extraction inspired from 'requests' +with open(os.path.join(package_folder_path, '__init__.py'), 'r') as fd: + version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', + fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError('Cannot find version information') + +with open('README.md') as f: + readme = f.read() +with open('HISTORY.md') as f: + history = f.read() + +exclude_packages = [ + 'tests', + 'examples', + # Exclude packages that will be covered by PEP420 or nspkg + 'azure', + 'azure.eventhub', + 'azure.eventhub.extensions', + ] + +setup( + name=PACKAGE_NAME, + version=version, + description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME), + long_description=readme + '\n\n' + history, + long_description_content_type='text/markdown', + license='MIT License', + author='Microsoft Corporation', + author_email='azpysdkhelp@microsoft.com', + url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/eventhub/azure-eventhubs-checkpointerblob-aio', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'License :: OSI Approved :: MIT License', + ], + zip_safe=False, + packages=find_packages(exclude=exclude_packages), + install_requires=[ + 'azure-storage-blob<13.0.0,>=12.0.0b2', + 'azure-eventhub<6.0.0,>=5.0.0b2', + ], + extras_require={ + + } +) From b5c933f548161e33ce0fbb48320c3836594ab26c Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 6 Sep 2019 23:57:24 -0700 Subject: [PATCH 35/51] exclude eventprocessor test for python27 --- sdk/eventhub/azure-eventhubs/conftest.py | 45 ++---------------------- 1 file changed, 2 insertions(+), 43 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/conftest.py b/sdk/eventhub/azure-eventhubs/conftest.py index c424357a77c4..ed0212e85562 100644 --- a/sdk/eventhub/azure-eventhubs/conftest.py +++ b/sdk/eventhub/azure-eventhubs/conftest.py @@ -16,10 +16,11 @@ collect_ignore = [] if sys.version_info < (3, 5): collect_ignore.append("tests/asynctests") + collect_ignore.append("tests/eventprocessor_tests") collect_ignore.append("features") collect_ignore.append("examples/async_examples") -from azure.eventhub import EventHubClient, EventHubConsumer, EventPosition +from azure.eventhub import EventHubClient, EventPosition def pytest_addoption(parser): @@ -202,45 +203,3 @@ def connstr_senders(connection_str): yield connection_str, senders for s in senders: s.close() - - -@pytest.fixture() -def storage_clm(eph): - try: - container = str(uuid.uuid4()) - storage_clm = AzureStorageCheckpointLeaseManager( - os.environ['AZURE_STORAGE_ACCOUNT'], - os.environ['AZURE_STORAGE_ACCESS_KEY'], - container) - except KeyError: - pytest.skip("Live Storage configuration not found.") - try: - storage_clm.initialize(eph) - storage_clm.storage_client.create_container(container) - yield storage_clm - finally: - try: - storage_clm.storage_client.delete_container(container) - except: - warnings.warn(UserWarning("storage container teardown failed")) - -@pytest.fixture() -def eh_partition_pump(eph): - lease = AzureBlobLease() - lease.with_partition_id("1") - partition_pump = EventHubPartitionPump(eph, lease) - return partition_pump - - -@pytest.fixture() -def partition_pump(eph): - lease = Lease() - lease.with_partition_id("1") - partition_pump = PartitionPump(eph, lease) - return partition_pump - - -@pytest.fixture() -def partition_manager(eph): - partition_manager = PartitionManager(eph) - return partition_manager From 7b0f5fe4e34207fa0b4e88a14256ed01e193434e Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 6 Sep 2019 23:58:23 -0700 Subject: [PATCH 36/51] exclude eventprocessor test --- .../tests/eventprocessor_tests/test_eventprocessor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/tests/eventprocessor_tests/test_eventprocessor.py b/sdk/eventhub/azure-eventhubs/tests/eventprocessor_tests/test_eventprocessor.py index 93cf137e1af5..bdc6fa6b6d78 100644 --- a/sdk/eventhub/azure-eventhubs/tests/eventprocessor_tests/test_eventprocessor.py +++ b/sdk/eventhub/azure-eventhubs/tests/eventprocessor_tests/test_eventprocessor.py @@ -77,14 +77,14 @@ async def receive(self): event_processor = EventProcessor(MockEventHubClient(), "$default", TestPartitionProcessor, partition_manager, polling_interval=0.5) - asyncio.get_running_loop().create_task(event_processor.start()) + asyncio.ensure_future(event_processor.start()) await asyncio.sleep(5) ep_list = [] for _ in range(2): ep = EventProcessor(MockEventHubClient(), "$default", TestPartitionProcessor, partition_manager, polling_interval=0.5) - asyncio.get_running_loop().create_task(ep.start()) + asyncio.ensure_future(ep.start()) ep_list.append(ep) await asyncio.sleep(5) assert len(event_processor._tasks) == 2 From 167361e56f707110dbbadecda6f608b46e592bf4 Mon Sep 17 00:00:00 2001 From: yijxie Date: Sat, 7 Sep 2019 00:51:23 -0700 Subject: [PATCH 37/51] Revert "Eventhubs blobstorage checkpointstore merge to preview3 (#7109)" This reverts commit 13a8fe7b346febc1fb1fdc859d6204ff3c124e9c. --- .../HISTORY.md | 0 .../LICENSE | 21 --- .../MANIFEST.in | 2 - .../README.md | 0 .../azure/__init__.py | 5 - .../azure/eventhub/__init__.py | 5 - .../azure/eventhub/extensions/__init__.py | 5 - .../checkpointstoreblobaio/__init__.py | 12 -- .../checkpointstoreblobaio/blobstoragepm.py | 123 ------------------ .../conftest.py | 0 .../dev_requirements.txt | 1 - .../event_processor_blob_storage_example.py | 42 ------ .../sdk_packaging.toml | 2 - .../setup.cfg | 2 - .../setup.py | 75 ----------- 15 files changed, 295 deletions(-) delete mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/HISTORY.md delete mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/LICENSE delete mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/MANIFEST.in delete mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/README.md delete mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/__init__.py delete mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/__init__.py delete mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/__init__.py delete mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/__init__.py delete mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/blobstoragepm.py delete mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/conftest.py delete mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/dev_requirements.txt delete mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/examples/event_processor_blob_storage_example.py delete mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/sdk_packaging.toml delete mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.cfg delete mode 100644 sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.py diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/HISTORY.md b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/HISTORY.md deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/LICENSE b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/LICENSE deleted file mode 100644 index 21071075c245..000000000000 --- a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ - MIT License - - Copyright (c) Microsoft Corporation. All rights reserved. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/MANIFEST.in b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/MANIFEST.in deleted file mode 100644 index 50c61fef797b..000000000000 --- a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -include *.md -include azure/__init__.py \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/README.md b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/README.md deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/__init__.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/__init__.py deleted file mode 100644 index 62351a0ab30b..000000000000 --- a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- -__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/__init__.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/__init__.py deleted file mode 100644 index 62351a0ab30b..000000000000 --- a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- -__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/__init__.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/__init__.py deleted file mode 100644 index 62351a0ab30b..000000000000 --- a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- -__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/__init__.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/__init__.py deleted file mode 100644 index 9e0e473c9a8b..000000000000 --- a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- - -__version__ = "1.0.0b1" - -from .blobstoragepm import BlobPartitionManager - -__all__ = [ - "BlobPartitionManager", -] diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/blobstoragepm.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/blobstoragepm.py deleted file mode 100644 index 85fcc1ca2eb5..000000000000 --- a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/blobstoragepm.py +++ /dev/null @@ -1,123 +0,0 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- -from typing import Iterable, Dict, Any -import logging -from collections import defaultdict -import asyncio -from azure.eventhub.aio.eventprocessor import PartitionManager, OwnershipLostError # type: ignore -from azure.core.exceptions import ResourceModifiedError, ResourceExistsError # type: ignore -from azure.storage.blob.aio import ContainerClient # type: ignore - -logger = logging.getLogger(__name__) -UPLOAD_DATA = "" - - -class BlobPartitionManager(PartitionManager): - """An PartitionManager that uses Azure Blob Storage to store the partition ownership and checkpoint data. - - This class implements methods list_ownership, claim_ownership, and update_checkpoint that are defined in class - azure.eventhub.eventprocessor.PartitionManager of package azure-eventhub. - - """ - def __init__(self, container_client: ContainerClient): - """ - - :param container_client: The Azure Blob Storage Container client. - """ - self._container_client = container_client - self._cached_ownership_dict = defaultdict(dict) # type: Dict[str, Dict[str, Any]] - # lock each partition for list_ownership, claim_ownership and update_checkpoint etag doesn't get out of sync - # when the three methods are running concurrently - self._cached_ownership_locks = defaultdict(asyncio.Lock) # type:Dict[str, asyncio.Lock] - - async def _upload_blob(self, ownership, metadata): - etag = ownership.get("etag") - if etag: - etag_match = {"if_match": etag} - else: - etag_match = {"if_none_match": '*'} - partition_id = ownership["partition_id"] - blob_client = await self._container_client.upload_blob( - name=partition_id, data=UPLOAD_DATA, overwrite=True, metadata=metadata, **etag_match - ) - uploaded_blob_properties = await blob_client.get_blob_properties() - ownership["etag"] = uploaded_blob_properties.etag - ownership["last_modified_time"] = uploaded_blob_properties.last_modified.timestamp() - - async def list_ownership(self, eventhub_name: str, consumer_group_name: str) -> Iterable[Dict[str, Any]]: - try: - blobs = self._container_client.list_blobs(include=['metadata']) - except Exception as err: # pylint:disable=broad-except - logger.warning("An exception occurred during list_ownership for eventhub %r consumer group %r. " - "Exception is %r", eventhub_name, consumer_group_name, err) - raise - async for b in blobs: - async with self._cached_ownership_locks[b.name]: - if b.name not in self._cached_ownership_dict \ - or b.last_modified.timestamp() >= self._cached_ownership_dict[b.name].get("last_modified_time"): - metadata = b.metadata - ownership = { - "eventhub_name": eventhub_name, - "consumer_group_name": consumer_group_name, - "partition_id": b.name, - "owner_id": metadata["owner_id"], - "etag": b.etag, - "last_modified_time": b.last_modified.timestamp() if b.last_modified else None - } - ownership.update(metadata) - self._cached_ownership_dict[b.name] = ownership - return self._cached_ownership_dict.values() - - async def claim_ownership(self, ownership_list: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]: - result = [] - for ownership in ownership_list: - partition_id = ownership["partition_id"] - eventhub_name = ownership["eventhub_name"] - consumer_group_name = ownership["consumer_group_name"] - owner_id = ownership["owner_id"] - - async with self._cached_ownership_locks[partition_id]: - metadata = {"owner_id": ownership["owner_id"]} - if "offset" in ownership: - metadata["offset"] = ownership["offset"] - if "sequence_number" in ownership: - metadata["sequence_number"] = ownership["sequence_number"] - try: - await self._upload_blob(ownership, metadata) - self._cached_ownership_dict[partition_id] = ownership - result.append(ownership) - except (ResourceModifiedError, ResourceExistsError): - logger.info( - "EventProcessor instance %r of eventhub %r consumer group %r lost ownership to partition %r", - owner_id, eventhub_name, consumer_group_name, partition_id) - except Exception as err: # pylint:disable=broad-except - logger.warning("An exception occurred when EventProcessor instance %r claim_ownership for " - "eventhub %r consumer group %r partition %r. The ownership is now lost. Exception " - "is %r", owner_id, eventhub_name, consumer_group_name, partition_id, err) - return result - - async def update_checkpoint(self, eventhub_name, consumer_group_name, partition_id, owner_id, - offset, sequence_number) -> None: - metadata = { - "owner_id": owner_id, - "offset": offset, - "sequence_number": str(sequence_number) - } - cached_ownership = self._cached_ownership_dict[partition_id] - async with self._cached_ownership_locks[partition_id]: - try: - await self._upload_blob(cached_ownership, metadata) - except (ResourceModifiedError, ResourceExistsError): - logger.info( - "EventProcessor instance %r of eventhub %r consumer group %r couldn't update_checkpoint to " - "partition %r because the ownership has been stolen", - owner_id, eventhub_name, consumer_group_name, partition_id) - raise OwnershipLostError() - except Exception as err: - logger.warning( - "EventProcessor instance %r of eventhub %r consumer group %r couldn't update_checkpoint to " - "partition %r because of unexpected error. Exception is %r", - owner_id, eventhub_name, consumer_group_name, partition_id, err) - raise # EventProcessor will catch the exception and handle it diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/conftest.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/conftest.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/dev_requirements.txt b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/dev_requirements.txt deleted file mode 100644 index 092dbcdb7de7..000000000000 --- a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/dev_requirements.txt +++ /dev/null @@ -1 +0,0 @@ --e ../../eventhub/azure-eventhubs \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/examples/event_processor_blob_storage_example.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/examples/event_processor_blob_storage_example.py deleted file mode 100644 index e7edc047831a..000000000000 --- a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/examples/event_processor_blob_storage_example.py +++ /dev/null @@ -1,42 +0,0 @@ -import asyncio -import logging -import os -from azure.eventhub.aio import EventHubClient -from azure.eventhub.aio.eventprocessor import EventProcessor, PartitionProcessor -from azure.eventhub.extensions.checkpointstoreblobaio import BlobPartitionManager -from azure.storage.blob.aio import ContainerClient - -RECEIVE_TIMEOUT = 5 # timeout in seconds for a receiving operation. 0 or None means no timeout -RETRY_TOTAL = 3 # max number of retries for receive operations within the receive timeout. Actual number of retries clould be less if RECEIVE_TIMEOUT is too small -CONNECTION_STR = os.environ["EVENT_HUB_CONN_STR"] -STORAGE_CONNECTION_STR = os.environ["AZURE_STORAGE_CONN_STR"] - -logging.basicConfig(level=logging.INFO) - - -async def do_operation(event): - # do some sync or async operations. If the operation is i/o intensive, async will have better performance - print(event) - - -class MyPartitionProcessor(PartitionProcessor): - async def process_events(self, events, partition_context): - if events: - await asyncio.gather(*[do_operation(event) for event in events]) - await partition_context.update_checkpoint(events[-1].offset, events[-1].sequence_number) - else: - print("empty events received", "partition:", partition_context.partition_id) - - -if __name__ == '__main__': - loop = asyncio.get_event_loop() - client = EventHubClient.from_connection_string(CONNECTION_STR, receive_timeout=RECEIVE_TIMEOUT, retry_total=RETRY_TOTAL) - container_client = ContainerClient.from_connection_string(STORAGE_CONNECTION_STR, container="eventprocessor") - partition_manager = BlobPartitionManager(container_client=container_client) - event_processor = EventProcessor(client, "$default", MyPartitionProcessor, partition_manager, polling_interval=10) - try: - loop.run_until_complete(event_processor.start()) - except KeyboardInterrupt: - loop.run_until_complete(event_processor.stop()) - finally: - loop.stop() diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/sdk_packaging.toml b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/sdk_packaging.toml deleted file mode 100644 index e7687fdae93b..000000000000 --- a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/sdk_packaging.toml +++ /dev/null @@ -1,2 +0,0 @@ -[packaging] -auto_update = false \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.cfg b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.cfg deleted file mode 100644 index 3480374bc2f2..000000000000 --- a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[bdist_wheel] -universal=1 \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.py b/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.py deleted file mode 100644 index 257854d88cb0..000000000000 --- a/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio/setup.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env python - -#------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -#-------------------------------------------------------------------------- - -import re -import os.path -import sys -from io import open -from setuptools import find_packages, setup - -if sys.version_info < (3, 5, 3): - raise RuntimeError('Only python 3.5.3 or above is supported') - -# Change the PACKAGE_NAME only to change folder and different name -PACKAGE_NAME = "azure-eventhub-checkpointstoreblob-aio" -PACKAGE_PPRINT_NAME = "Event Hubs checkpointer implementation with Blob Storage" - -package_folder_path = "azure/eventhub/extensions/checkpointstoreblobaio" -namespace_name = "azure.eventhub.extensions.checkpointstoreblobaio" - -# Version extraction inspired from 'requests' -with open(os.path.join(package_folder_path, '__init__.py'), 'r') as fd: - version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', - fd.read(), re.MULTILINE).group(1) - -if not version: - raise RuntimeError('Cannot find version information') - -with open('README.md') as f: - readme = f.read() -with open('HISTORY.md') as f: - history = f.read() - -exclude_packages = [ - 'tests', - 'examples', - # Exclude packages that will be covered by PEP420 or nspkg - 'azure', - 'azure.eventhub', - 'azure.eventhub.extensions', - ] - -setup( - name=PACKAGE_NAME, - version=version, - description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME), - long_description=readme + '\n\n' + history, - long_description_content_type='text/markdown', - license='MIT License', - author='Microsoft Corporation', - author_email='azpysdkhelp@microsoft.com', - url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/eventhub/azure-eventhubs-checkpointerblob-aio', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Programming Language :: Python', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'License :: OSI Approved :: MIT License', - ], - zip_safe=False, - packages=find_packages(exclude=exclude_packages), - install_requires=[ - 'azure-storage-blob<13.0.0,>=12.0.0b2', - 'azure-eventhub<6.0.0,>=5.0.0b2', - ], - extras_require={ - - } -) From 1253983dea24f93a8580195aed2f1fdf430b9093 Mon Sep 17 00:00:00 2001 From: "Adam Ling (MSFT)" <47871814+yunhaoling@users.noreply.github.com> Date: Sat, 7 Sep 2019 16:42:33 -0700 Subject: [PATCH 38/51] Fix small problem in consumer iterator (#7110) --- .../azure-eventhubs/azure/eventhub/aio/consumer_async.py | 3 +++ sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py index d3651a1c9d8f..3aa1a7d6bbe5 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py @@ -93,6 +93,7 @@ def __aiter__(self): async def __anext__(self): retried_times = 0 + last_exception = None while retried_times < self._client._config.max_retries: # pylint:disable=protected-access try: await self._open() @@ -108,6 +109,8 @@ async def __anext__(self): await self._client._try_delay(retried_times=retried_times, last_exception=last_exception, # pylint:disable=protected-access entity_name=self._name) retried_times += 1 + log.info("%r operation has exhausted retry. Last exception: %r.", self._name, last_exception) + raise last_exception def _create_handler(self): alt_creds = { diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py index 0e89ba9bc55f..499c3ba5429e 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py @@ -89,6 +89,7 @@ def __iter__(self): def __next__(self): retried_times = 0 + last_exception = None while retried_times < self._client._config.max_retries: # pylint:disable=protected-access try: self._open() @@ -104,6 +105,8 @@ def __next__(self): self._client._try_delay(retried_times=retried_times, last_exception=last_exception, # pylint:disable=protected-access entity_name=self._name) retried_times += 1 + log.info("%r operation has exhausted retry. Last exception: %r.", self._name, last_exception) + raise last_exception def _create_handler(self): alt_creds = { From 548a989fc87bf8a0530297e95503177cfe3d425e Mon Sep 17 00:00:00 2001 From: yijxie Date: Sat, 7 Sep 2019 22:46:13 -0700 Subject: [PATCH 39/51] Fixed an issue that initializes partition processor multiple times --- .../azure/eventhub/aio/eventprocessor/event_processor.py | 9 ++++++++- .../tests/eventprocessor_tests/test_eventprocessor.py | 7 +++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py index 77f534e9f362..85f6f1983250 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py @@ -236,9 +236,16 @@ async def close(reason): ) try: + try: + await partition_processor.initialize(partition_context) + except Exception as err: # pylint:disable=broad-except + log.warning( + "PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r" + " has an error during running initialize(). The exception is %r.", + owner_id, eventhub_name, partition_id, consumer_group_name, err + ) while True: try: - await partition_processor.initialize(partition_context) events = await partition_consumer.receive() await partition_processor.process_events(events, partition_context) except asyncio.CancelledError: diff --git a/sdk/eventhub/azure-eventhubs/tests/eventprocessor_tests/test_eventprocessor.py b/sdk/eventhub/azure-eventhubs/tests/eventprocessor_tests/test_eventprocessor.py index bdc6fa6b6d78..28ff7cd6554b 100644 --- a/sdk/eventhub/azure-eventhubs/tests/eventprocessor_tests/test_eventprocessor.py +++ b/sdk/eventhub/azure-eventhubs/tests/eventprocessor_tests/test_eventprocessor.py @@ -241,12 +241,15 @@ async def receive(self): @pytest.mark.asyncio async def test_partition_processor_process_error_close_error(): class TestPartitionProcessor(PartitionProcessor): + async def initialize(self, partition_context): + raise RuntimeError("initialize error") + async def process_events(self, events, partition_context): - raise RuntimeError("process_error") + raise RuntimeError("process_events error") async def process_error(self, error, partition_context): assert isinstance(error, RuntimeError) - raise RuntimeError("error from process_error") + raise RuntimeError("process_error error") async def close(self, reason, partition_context): assert reason == CloseReason.PROCESS_EVENTS_ERROR From 725b3331a560ce6977cd855874793a50391d6246 Mon Sep 17 00:00:00 2001 From: yijxie Date: Sun, 8 Sep 2019 21:19:49 -0700 Subject: [PATCH 40/51] Update release history for 5.0.0b3 --- sdk/eventhub/azure-eventhubs/HISTORY.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/sdk/eventhub/azure-eventhubs/HISTORY.md b/sdk/eventhub/azure-eventhubs/HISTORY.md index b51e636b4628..34e9d718e7f1 100644 --- a/sdk/eventhub/azure-eventhubs/HISTORY.md +++ b/sdk/eventhub/azure-eventhubs/HISTORY.md @@ -1,4 +1,18 @@ # Release History +## 5.0.0b3 (2019-09-10) + +**New features** +- `EventProcessor` has a load balancer that balances load among multiple EventProcessors automatically +- In addition to `SamplePartitionManager`, A new `PartitionManager` implementation that uses Azure Blob Storage is added +to centrally store the checkpoint data for event processors. It's not packaged separately as a plug-in to this package. +Refer to [Azure Blob Storage Partition Manager](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio) for details. + +**Breaking changes** + +- `PartitionProcessor` constructor removed argument "checkpoint_manager". Its methods (initialize, process_events, +process_error, close) added argument "partition_context", which has method update_checkpoint. +- `CheckpointManager` was replaced by `PartitionContext` +- Renamed `Sqlite3PartitionManager` to `SamplePartitionManager` ## 5.0.0b2 (2019-08-06) From c359042406874efa35a8be900e4764a4c8531129 Mon Sep 17 00:00:00 2001 From: yijxie Date: Sun, 8 Sep 2019 21:20:21 -0700 Subject: [PATCH 41/51] Update README for 5.0.0b3 --- sdk/eventhub/azure-eventhubs/README.md | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/README.md b/sdk/eventhub/azure-eventhubs/README.md index 621c7ebe8179..94280c2df912 100644 --- a/sdk/eventhub/azure-eventhubs/README.md +++ b/sdk/eventhub/azure-eventhubs/README.md @@ -217,13 +217,16 @@ Using an `EventHubConsumer` to consume events like in the previous examples puts The `EventProcessor` will delegate the processing of events to a `PartitionProcessor` that you provide, allowing you to focus on business logic while the processor holds responsibility for managing the underlying consumer operations including checkpointing and load balancing. -While load balancing is a feature we will be adding in the next update, you can see how to use the `EventProcessor` in the below example, where we use an in memory `PartitionManager` that does checkpointing in memory. +You can see how to use the `EventProcessor` in the below example, where we use an in memory `PartitionManager` that does checkpointing in memory. + +[Azure Blob Storage Partition Manager](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio) is another `PartitionManager` implementation that allows multiple EventProcessors to share the load balancing and checkpoint data in a central storage. + ```python import asyncio from azure.eventhub.aio import EventHubClient -from azure.eventhub.eventprocessor import EventProcessor, PartitionProcessor, Sqlite3PartitionManager +from azure.eventhub.aio.eventprocessor import EventProcessor, PartitionProcessor, SamplePartitionManager connection_str = '<< CONNECTION STRING FOR THE EVENT HUBS NAMESPACE >>' @@ -232,24 +235,16 @@ async def do_operation(event): print(event) class MyPartitionProcessor(PartitionProcessor): - def __init__(self, checkpoint_manager): - super(MyPartitionProcessor, self).__init__(checkpoint_manager) - - async def process_events(self, events): + async def process_events(self, events, partition_context): if events: await asyncio.gather(*[do_operation(event) for event in events]) - await self._checkpoint_manager.update_checkpoint(events[-1].offset, events[-1].sequence_number) - -def partition_processor_factory(checkpoint_manager): - return MyPartitionProcessor(checkpoint_manager) + await partition_context.update_checkpoint(events[-1].offset, events[-1].sequence_number) async def main(): client = EventHubClient.from_connection_string(connection_str, receive_timeout=5, retry_total=3) - partition_manager = Sqlite3PartitionManager() # in-memory PartitionManager + partition_manager = SamplePartitionManager() # in-memory PartitionManager. try: event_processor = EventProcessor(client, "$default", MyPartitionProcessor, partition_manager) - # You can also define a callable object for creating PartitionProcessor like below: - # event_processor = EventProcessor(client, "$default", partition_processor_factory, partition_manager) asyncio.ensure_future(event_processor.start()) await asyncio.sleep(60) await event_processor.stop() @@ -273,6 +268,7 @@ The Event Hubs APIs generate the following exceptions. - **EventDataError:** The EventData to be sent fails data validation. For instance, this error is raised if you try to send an EventData that is already sent. - **EventDataSendError:** The Eventhubs service responds with an error when an EventData is sent. +- **OperationTimeoutError:** EventHubConsumer.send() times out. - **EventHubError:** All other Eventhubs related errors. It is also the root error class of all the above mentioned errors. ## Next steps From 1e98a2b488f3a83c2cc1cb5404d4b9dcc4149069 Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 9 Sep 2019 01:03:11 -0700 Subject: [PATCH 42/51] Fix an issue --- .../azure/eventhub/aio/eventprocessor/event_processor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py index 85f6f1983250..efff887d9f27 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py @@ -145,12 +145,12 @@ async def start(self): await asyncio.sleep(self._polling_interval) continue - to_cancel_list = self._tasks.keys() if claimed_ownership_list: claimed_partition_ids = [x["partition_id"] for x in claimed_ownership_list] to_cancel_list = self._tasks.keys() - claimed_partition_ids self._create_tasks_for_claimed_ownership(claimed_ownership_list) else: + to_cancel_list = set(self._tasks.keys()) log.info("EventProcessor %r hasn't claimed an ownership. It keeps claiming.", self._id) if to_cancel_list: self._cancel_tasks_for_partitions(to_cancel_list) From c408d7c4e1e22ce412c052849ceeefd4ca00e0cf Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 9 Sep 2019 13:23:56 -0700 Subject: [PATCH 43/51] fix a small issue --- .../azure/eventhub/aio/eventprocessor/event_processor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py index efff887d9f27..4356a7c1b74b 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py @@ -182,7 +182,7 @@ def _cancel_tasks_for_partitions(self, to_cancel_partitions): def _create_tasks_for_claimed_ownership(self, to_claim_ownership_list): for ownership in to_claim_ownership_list: partition_id = ownership["partition_id"] - if partition_id not in self._tasks: + if partition_id not in self._tasks or self._tasks[partition_id].done(): self._tasks[partition_id] = get_running_loop().create_task(self._receive(ownership)) async def _receive(self, ownership): From 48de36cfd2b8d1cc5b55698628f465c88b1af5a7 Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 9 Sep 2019 13:24:32 -0700 Subject: [PATCH 44/51] Fix a small issue --- sdk/eventhub/azure-eventhubs/HISTORY.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/HISTORY.md b/sdk/eventhub/azure-eventhubs/HISTORY.md index 34e9d718e7f1..a2dcdb1e5b55 100644 --- a/sdk/eventhub/azure-eventhubs/HISTORY.md +++ b/sdk/eventhub/azure-eventhubs/HISTORY.md @@ -3,8 +3,8 @@ **New features** - `EventProcessor` has a load balancer that balances load among multiple EventProcessors automatically -- In addition to `SamplePartitionManager`, A new `PartitionManager` implementation that uses Azure Blob Storage is added -to centrally store the checkpoint data for event processors. It's not packaged separately as a plug-in to this package. +- A new `PartitionManager` implementation that uses Azure Blob Storage is added +to centrally store the checkpoint data for event processors. It's packaged separately as a plug-in to this package. Refer to [Azure Blob Storage Partition Manager](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio) for details. **Breaking changes** From 8563cb8d1be0a335e8402f9c083994885e971ee6 Mon Sep 17 00:00:00 2001 From: "Adam Ling (MSFT)" <47871814+yunhaoling@users.noreply.github.com> Date: Mon, 9 Sep 2019 16:30:03 -0700 Subject: [PATCH 45/51] Updates for docs, tests (#7111) * Small fix * Update history and readme * Update docstring of eventhub * Small fix --- sdk/eventhub/azure-eventhubs/HISTORY.md | 22 ++++++++++++------- sdk/eventhub/azure-eventhubs/README.md | 6 +---- .../azure/eventhub/aio/consumer_async.py | 1 + .../aio/eventprocessor/event_processor.py | 2 +- .../azure/eventhub/aio/producer_async.py | 1 + .../azure/eventhub/consumer.py | 1 + .../azure/eventhub/producer.py | 1 + .../tests/test_longrunning_send.py | 6 ++--- 8 files changed, 23 insertions(+), 17 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/HISTORY.md b/sdk/eventhub/azure-eventhubs/HISTORY.md index a2dcdb1e5b55..456b0fb18e98 100644 --- a/sdk/eventhub/azure-eventhubs/HISTORY.md +++ b/sdk/eventhub/azure-eventhubs/HISTORY.md @@ -1,18 +1,24 @@ # Release History + ## 5.0.0b3 (2019-09-10) **New features** -- `EventProcessor` has a load balancer that balances load among multiple EventProcessors automatically -- A new `PartitionManager` implementation that uses Azure Blob Storage is added -to centrally store the checkpoint data for event processors. It's packaged separately as a plug-in to this package. -Refer to [Azure Blob Storage Partition Manager](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio) for details. + +- Added support for automatic load balancing among multiple `EventProcessor`. +- Added `BlobPartitionManager` which implements `PartitionManager`. + - Azure Blob Storage is applied for storing data used by `EventProcessor`. + - Packaged separately as a plug-in to `EventProcessor`. + - For details, please refer to [Azure Blob Storage Partition Manager](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio). +- Added property `system_properties` on `EventData`. **Breaking changes** -- `PartitionProcessor` constructor removed argument "checkpoint_manager". Its methods (initialize, process_events, -process_error, close) added argument "partition_context", which has method update_checkpoint. -- `CheckpointManager` was replaced by `PartitionContext` -- Renamed `Sqlite3PartitionManager` to `SamplePartitionManager` +- Removed constructor method of `PartitionProcessor`. For initialization please implement the method `initialize`. +- Replaced `CheckpointManager` by `PartitionContext`. + - `PartitionContext` has partition context information and method `update_checkpoint`. +- Updated all methods of `PartitionProcessor` to include `PartitionContext` as part of the arguments. +- Updated accessibility of class members in `EventHub/EventHubConsumer/EventHubProducer`to be private. + ## 5.0.0b2 (2019-08-06) diff --git a/sdk/eventhub/azure-eventhubs/README.md b/sdk/eventhub/azure-eventhubs/README.md index 94280c2df912..f3f29afd49fe 100644 --- a/sdk/eventhub/azure-eventhubs/README.md +++ b/sdk/eventhub/azure-eventhubs/README.md @@ -9,7 +9,7 @@ The Azure Event Hubs client library allows for publishing and consuming of Azure - Observe interesting operations and interactions happening within your business or other ecosystem, allowing loosely coupled systems to interact without the need to bind them together. - Receive events from one or more publishers, transform them to better meet the needs of your ecosystem, then publish the transformed events to a new stream for consumers to observe. -[Source code](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/eventhub/azure-eventhubs) | [Package (PyPi)](https://pypi.org/project/azure-eventhub/) | [API reference documentation](https://azure.github.io/azure-sdk-for-python/ref/azure.eventhub) | [Product documentation](https://docs.microsoft.com/en-ca/azure/event-hubs/) +[Source code](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/eventhub/azure-eventhubs) | [Package (PyPi)](https://pypi.org/project/azure-eventhub/) | [API reference documentation](https://azure.github.io/azure-sdk-for-python/ref/azure.eventhub) | [Product documentation](https://docs.microsoft.com/en-us/azure/event-hubs/) ## Getting started @@ -20,10 +20,6 @@ Install the Azure Event Hubs client library for Python with pip: ``` $ pip install --pre azure-eventhub ``` -For Python2.7, please install package "typing". This is a workaround for [issue 6767](https://github.com/Azure/azure-sdk-for-python/issues/6767). -``` -$ pip install typing -``` **Prerequisites** diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py index 3aa1a7d6bbe5..61da41316cb3 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py @@ -31,6 +31,7 @@ class EventHubConsumer(ConsumerProducerMixin): # pylint:disable=too-many-instan group to be actively reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-Epoch Consumers." + Please use the method `create_consumer` on `EventHubClient` for creating `EventHubConsumer`. """ _timeout = 0 _epoch_symbol = b'com.microsoft:epoch' diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py index 4356a7c1b74b..0b8ca10f20ba 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py @@ -23,7 +23,7 @@ class EventProcessor(object): # pylint:disable=too-many-instance-attributes """ - An EventProcessor constantly receives events from all partitions of the Event Hub in the context of a given + An EventProcessor constantly receives events from multiple partitions of the Event Hub in the context of a given consumer group. The received data will be sent to PartitionProcessor to be processed. It provides the user a convenient way to receive events from multiple partitions and save checkpoints. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index 999bdc09c787..ec4e39c87116 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -26,6 +26,7 @@ class EventHubProducer(ConsumerProducerMixin): # pylint: disable=too-many-insta be created to allow event data to be automatically routed to an available partition or specific to a partition. + Please use the method `create_producer` on `EventHubClient` for creating `EventHubProducer`. """ _timeout_symbol = b'com.microsoft:timeout' diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py index 499c3ba5429e..3ba14020aaf7 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py @@ -33,6 +33,7 @@ class EventHubConsumer(ConsumerProducerMixin): # pylint:disable=too-many-instan group to be actively reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-Epoch Consumers." + Please use the method `create_consumer` on `EventHubClient` for creating `EventHubConsumer`. """ _timeout = 0 _epoch_symbol = b'com.microsoft:epoch' diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py index 8008fac7ecd0..cab9638f2acc 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py @@ -39,6 +39,7 @@ class EventHubProducer(ConsumerProducerMixin): # pylint:disable=too-many-instan be created to allow event data to be automatically routed to an available partition or specific to a partition. + Please use the method `create_producer` on `EventHubClient` for creating `EventHubProducer`. """ _timeout_symbol = b'com.microsoft:timeout' diff --git a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py index e737ee6889d7..93e7e85b4287 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py @@ -61,12 +61,12 @@ def send(sender, args): total += 1 except ValueError: sender.send(batch, timeout=0) - print("Sent total {} of partition {}".format(total, sender.partition)) + print("Sent total {} of partition {}".format(total, sender._partition)) batch = sender.create_batch() except Exception as err: - print("Partition {} send failed {}".format(sender.partition, err)) + print("Partition {} send failed {}".format(sender._partition, err)) raise - print("Sent total {} of partition {}".format(total, sender.partition)) + print("Sent total {} of partition {}".format(total, sender._partition)) @pytest.mark.liveTest From 6aed4193509a5af420c5de03142908689984e794 Mon Sep 17 00:00:00 2001 From: "Adam Ling (MSFT)" <47871814+yunhaoling@users.noreply.github.com> Date: Mon, 9 Sep 2019 17:39:47 -0700 Subject: [PATCH 46/51] Update history (#7158) --- sdk/eventhub/azure-eventhubs/HISTORY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/HISTORY.md b/sdk/eventhub/azure-eventhubs/HISTORY.md index 456b0fb18e98..c5af555047bc 100644 --- a/sdk/eventhub/azure-eventhubs/HISTORY.md +++ b/sdk/eventhub/azure-eventhubs/HISTORY.md @@ -18,7 +18,7 @@ - `PartitionContext` has partition context information and method `update_checkpoint`. - Updated all methods of `PartitionProcessor` to include `PartitionContext` as part of the arguments. - Updated accessibility of class members in `EventHub/EventHubConsumer/EventHubProducer`to be private. - +- Moved `azure.eventhub.eventprocessor` under `aio` package, which now becomes `azure.eventhub.aio.eventprocessor`. ## 5.0.0b2 (2019-08-06) From 18aa083b2ee412b028c9d1641cb0aa9501672aca Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 9 Sep 2019 18:07:33 -0700 Subject: [PATCH 47/51] add pkgutil --- sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py index dfc198f71fa8..62b2a6b811d8 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py @@ -3,6 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore __version__ = "5.0.0b3" from uamqp import constants # type: ignore from azure.eventhub.common import EventData, EventDataBatch, EventPosition From f577a1a508278194a7472a243d3a3d55845a0e5a Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 9 Sep 2019 23:08:42 -0700 Subject: [PATCH 48/51] Update docstring --- .../azure/eventhub/aio/client_async.py | 4 +-- .../azure/eventhub/aio/consumer_async.py | 4 +-- .../aio/eventprocessor/event_processor.py | 30 +++++++++---------- .../aio/eventprocessor/partition_manager.py | 6 ++-- .../aio/eventprocessor/partition_processor.py | 6 ++-- .../azure-eventhubs/azure/eventhub/client.py | 4 +-- .../azure/eventhub/client_abstract.py | 2 +- .../azure-eventhubs/azure/eventhub/common.py | 21 +++++++++---- .../azure/eventhub/consumer.py | 4 +-- 9 files changed, 45 insertions(+), 36 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index 67f6ab52dd30..88b693d157ec 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -166,7 +166,7 @@ async def get_properties(self): -'partition_ids' :rtype: dict - :raises: ~azure.eventhub.ConnectError + :raises: ~azure.eventhub.EventHubError """ if self._is_iothub and not self._iothub_redirect_info: await self._iothub_redirect() @@ -207,7 +207,7 @@ async def get_partition_properties(self, partition): :param partition: The target partition id. :type partition: str :rtype: dict - :raises: ~azure.eventhub.ConnectError + :raises: ~azure.eventhub.EventHubError """ if self._is_iothub and not self._iothub_redirect_info: await self._iothub_redirect() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py index 61da41316cb3..efad6a3cb7db 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py @@ -52,8 +52,8 @@ def __init__( # pylint: disable=super-init-not-called :param prefetch: The number of events to prefetch from the service for processing. Default is 300. :type prefetch: int - :param owner_level: The priority of the exclusive consumer. It will an exclusive - consumer if owner_level is set. + :param owner_level: The priority of the exclusive consumer. An exclusive + consumer will be created if owner_level is set. :type owner_level: int :param loop: An event loop. """ diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py index 0b8ca10f20ba..ec65d0453d5c 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py @@ -90,11 +90,13 @@ def __init__( :type consumer_group_name: str :param partition_processor_type: A subclass type of ~azure.eventhub.eventprocessor.PartitionProcessor. :type partition_processor_type: type - :param partition_manager: Interacts with the storage system, dealing with ownership and checkpoints. - For an easy start, SamplePartitionManager comes with the package. - :type partition_manager: Class implementing the ~azure.eventhub.eventprocessor.PartitionManager. + :param partition_manager: Interacts with the data storage that stores ownership and checkpoints data. + ~azure.eventhub.aio.eventprocessor.SamplePartitionManager can be used to save data in memory or to a file. + More sophisticated partition managers are / will be provided as plug-ins. Users can also develop their own + partition managers. + :type partition_manager: Subclass of ~azure.eventhub.eventprocessor.PartitionManager. :param initial_event_position: The event position to start a partition consumer. - if the partition has no checkpoint yet. This will be replaced by "reset" checkpoint in the near future. + if the partition has no checkpoint yet. This could be replaced by "reset" checkpoint in the near future. :type initial_event_position: EventPosition :param polling_interval: The interval between any two pollings of balancing and claiming :type polling_interval: float @@ -117,15 +119,10 @@ def __repr__(self): return 'EventProcessor: id {}'.format(self._id) async def start(self): - """Start the EventProcessor. + """Start the EventProcessor - 1. Calls the OwnershipManager to keep claiming and balancing ownership of partitions in an - infinitely loop until self.stop() is called. - 2. Cancels tasks for partitions that are no longer owned by this EventProcessor - 3. Creates tasks for partitions that are newly claimed by this EventProcessor - 4. Keeps tasks running for partitions that haven't changed ownership - 5. Each task repeatedly calls EvenHubConsumer.receive() to retrieve events and - call user defined partition processor + This EventProcessor will then start to balance partition ownership with other EventProcessors + and asynchronously start to receive EventData from EventHub and process events. :return: None @@ -158,10 +155,13 @@ async def start(self): await asyncio.sleep(self._polling_interval) async def stop(self): - """Stop claiming ownership and all the partition consumers owned by this EventProcessor + """Stop the EventProcessor - This method stops claiming ownership of owned partitions and cancels tasks that are running - EventHubConsumer.receive() for the partitions owned by this EventProcessor. + This EventProcessor will stop receiving events from EventHubs and release the ownership of the partitions + it is working on. + If other EventProcessors are still working, they will take over these partitions. + + A stopped EventProcessor can be restarted by calling method start() again. :return: None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_manager.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_manager.py index 4bb84779dd53..bb55e00d52c5 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_manager.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_manager.py @@ -10,7 +10,7 @@ class PartitionManager(ABC): """ PartitionManager deals with the interaction with the chosen storage service. - It's able to list/claim ownership and create checkpoint. + It's able to list/claim ownership and save checkpoint. """ @abstractmethod @@ -76,11 +76,11 @@ async def update_checkpoint(self, eventhub_name, consumer_group_name, partition_ will be associated with. :type sequence_number: int :return: None - :raise: `OwnershipLostError`, `CheckpointError` + :raise: `OwnershipLostError` """ class OwnershipLostError(Exception): - """Raises when update_checkpoint detects the ownership has been lost + """Raises when update_checkpoint detects the ownership to a partition has been lost """ diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_processor.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_processor.py index 8b0fb2ca7e5c..9481c032eb86 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_processor.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_processor.py @@ -21,12 +21,12 @@ class PartitionProcessor(ABC): """ PartitionProcessor processes events received from the Azure Event Hubs service. A single instance of a class implementing this abstract class will be created for every partition the associated - ~azure.eventhub.eventprocessor.EventProcessor owns. + ~azure.eventhub.aio.eventprocessor.EventProcessor owns. """ async def initialize(self, partition_context: PartitionContext): - """ + """Called when EventProcessor creates this PartitionProcessor. :param partition_context: The context information of this partition. :type partition_context: ~azure.eventhub.aio.eventprocessor.PartitionContext @@ -59,7 +59,7 @@ async def process_events(self, events: List[EventData], partition_context: Parti """ async def process_error(self, error, partition_context: PartitionContext): - """Called when an error happens + """Called when an error happens when receiving or processing events :param error: The error that happens. :type error: Exception diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index 90a1ac86742f..06d264b5b9ac 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -168,7 +168,7 @@ def get_properties(self): -'partition_ids' :rtype: dict - :raises: ~azure.eventhub.ConnectError + :raises: ~azure.eventhub.EventHubError """ if self._is_iothub and not self._iothub_redirect_info: self._iothub_redirect() @@ -188,7 +188,7 @@ def get_partition_ids(self): Get partition ids of the specified EventHub. :rtype: list[str] - :raises: ~azure.eventhub.ConnectError + :raises: ~azure.eventhub.EventHubError """ return self.get_properties()['partition_ids'] diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 7d4c8cd2712e..c6879730266c 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -258,7 +258,7 @@ def from_connection_string(cls, conn_str, **kwargs): will return as soon as service returns no new events. Default value is the same as prefetch. :type max_batch_size: int :param receive_timeout: The timeout in seconds to receive a batch of events from an Event Hub. - Default value is 0 seconds. + Default value is 0 seconds, meaning there is no timeout. :type receive_timeout: float :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is queued. Default value is 60 seconds. If set to 0, there will be no timeout. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index 73fed892db11..7a25fbdd7bac 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -39,7 +39,6 @@ def parse_sas_token(sas_token): class EventData(object): """ The EventData class is a holder of event content. - Acts as a wrapper to an uamqp.message.Message object. Example: .. literalinclude:: ../examples/test_examples_eventhub.py @@ -262,9 +261,18 @@ def encode_message(self): class EventDataBatch(object): """ - The EventDataBatch class is a holder of a batch of event data within max size bytes. - Use ~azure.eventhub.Producer.create_batch method to create an EventDataBatch object. - Do not instantiate an EventDataBatch object directly. + It's much faster to send EventData in a batch than individually. But putting too much EventData in one batch + may exceed the frame size limit of the event hub. + EventDataBatch helps you build the maximum allowed size batch of EventData to improve performance + within the size limit + + Use create_batch method of ~azure.eventhub.EventHubProducer or ~azure.eventhub.aio.EventHubProducer + to create an EventDataBatch object. It retrieves the frame size limit from the service. + Use method EventDataBatch.try_add to build the list until a ValueError is raised, + and use send method of ~azure.eventhub.EventHubProducer or ~azure.eventhub.aio.EventHubProducer + to send out the EventData batch to EventHub + + Do not instantiate an EventDataBatch object using its constructor. """ def __init__(self, max_size=None, partition_key=None): @@ -307,8 +315,9 @@ def _set_partition_key(self, value): def try_add(self, event_data): """ The message size is a sum up of body, properties, header, etc. - :param event_data: - :return: + :param event_data: ~azure.eventhub.EventData + :return: None + :raise: ValueError, when exceeding the size limit. """ if event_data is None: log.warning("event_data is None when calling EventDataBatch.try_add. Ignored") diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py index 3ba14020aaf7..604d9c7d7b82 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py @@ -51,8 +51,8 @@ def __init__(self, client, source, **kwargs): :param prefetch: The number of events to prefetch from the service for processing. Default is 300. :type prefetch: int - :param owner_level: The priority of the exclusive consumer. It will an exclusive - consumer if owner_level is set. + :param owner_level: The priority of the exclusive consumer. An exclusive + consumer will be created if owner_level is set. :type owner_level: int """ event_position = kwargs.get("event_position", None) From 6aca2cd40ac7b0170e555e91968b213057a66243 Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 9 Sep 2019 23:09:08 -0700 Subject: [PATCH 49/51] Small README update --- sdk/eventhub/azure-eventhubs/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/README.md b/sdk/eventhub/azure-eventhubs/README.md index f3f29afd49fe..e3b99aad051d 100644 --- a/sdk/eventhub/azure-eventhubs/README.md +++ b/sdk/eventhub/azure-eventhubs/README.md @@ -238,7 +238,7 @@ class MyPartitionProcessor(PartitionProcessor): async def main(): client = EventHubClient.from_connection_string(connection_str, receive_timeout=5, retry_total=3) - partition_manager = SamplePartitionManager() # in-memory PartitionManager. + partition_manager = SamplePartitionManager() # in-memory or file based PartitionManager try: event_processor = EventProcessor(client, "$default", MyPartitionProcessor, partition_manager) asyncio.ensure_future(event_processor.start()) From 40bb7c881881113a156cf710ee5b28c5f8021a49 Mon Sep 17 00:00:00 2001 From: "Adam Ling (MSFT)" <47871814+yunhaoling@users.noreply.github.com> Date: Tue, 10 Sep 2019 00:45:17 -0700 Subject: [PATCH 50/51] Update docstring (#7166) --- sdk/eventhub/azure-eventhubs/README.md | 67 ++++++++++++++++++- .../aio/eventprocessor/event_processor.py | 20 +++--- .../aio/eventprocessor/partition_processor.py | 10 ++- .../azure-eventhubs/azure/eventhub/common.py | 20 +++--- 4 files changed, 92 insertions(+), 25 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/README.md b/sdk/eventhub/azure-eventhubs/README.md index e3b99aad051d..6dc04539f0b8 100644 --- a/sdk/eventhub/azure-eventhubs/README.md +++ b/sdk/eventhub/azure-eventhubs/README.md @@ -109,6 +109,8 @@ partition_ids = client.get_partition_ids() Publish events to an Event Hub. +#### Send a single event or an array of events + ```python from azure.eventhub import EventHubClient, EventData @@ -130,6 +132,34 @@ finally: pass ``` +#### Send a batch of events + +Use the `create_batch` method on `EventHubProcuer` to create an `EventDataBatch` object which can then be sent using the `send` method. Events may be added to the `EventDataBatch` using the `try_add` method until the maximum batch size limit in bytes has been reached. +```python +from azure.eventhub import EventHubClient, EventData + +try: + connection_str = '<< CONNECTION STRING FOR THE EVENT HUBS NAMESPACE >>' + event_hub_path = '<< NAME OF THE EVENT HUB >>' + client = EventHubClient.from_connection_string(connection_str, event_hub_path) + producer = client.create_producer(partition_id="0") + + event_data_batch = producer.create_batch(max_size=10000) + can_add = True + while can_add: + try: + event_data_batch.try_add(EventData('Message inside EventBatchData')) + except ValueError: + can_add = False # EventDataBatch object reaches max_size. + + with producer: + producer.send(event_data_batch) +except: + raise +finally: + pass +``` + ### Consume events from an Event Hub Consume events from an Event Hub. @@ -159,6 +189,7 @@ finally: Publish events to an Event Hub asynchronously. +#### Send a single event or an array of events ```python from azure.eventhub.aio import EventHubClient from azure.eventhub import EventData @@ -174,7 +205,37 @@ try: event_list.append(EventData(b"A single event")) async with producer: - await producer.send(event_list) + await producer.send(event_list) # Send a list of events + await producer.send(EventData(b"A single event")) # Send a single event +except: + raise +finally: + pass +``` + +#### Send a batch of events + +Use the `create_batch` method on `EventHubProcuer` to create an `EventDataBatch` object which can then be sent using the `send` method. Events may be added to the `EventDataBatch` using the `try_add` method until the maximum batch size limit in bytes has been reached. +```python +from azure.eventhub.aio import EventHubClient +from azure.eventhub import EventData + +try: + connection_str = '<< CONNECTION STRING FOR THE EVENT HUBS NAMESPACE >>' + event_hub_path = '<< NAME OF THE EVENT HUB >>' + client = EventHubClient.from_connection_string(connection_str, event_hub_path) + producer = client.create_producer(partition_id="0") + + event_data_batch = await producer.create_batch(max_size=10000) + can_add = True + while can_add: + try: + event_data_batch.try_add(EventData('Message inside EventBatchData')) + except ValueError: + can_add = False # EventDataBatch object reaches max_size. + + async with producer: + await producer.send(event_data_batch) except: raise finally: @@ -213,9 +274,11 @@ Using an `EventHubConsumer` to consume events like in the previous examples puts The `EventProcessor` will delegate the processing of events to a `PartitionProcessor` that you provide, allowing you to focus on business logic while the processor holds responsibility for managing the underlying consumer operations including checkpointing and load balancing. +Load balancing is typically useful when running multiple instances of `EventProcessor` across multiple processes or even machines. It is recommended to store checkpoints to a persistent store when running in production. Search pypi with the prefix `azure-eventhubs-checkpoint` to find packages that support persistent storage of checkpoints. + You can see how to use the `EventProcessor` in the below example, where we use an in memory `PartitionManager` that does checkpointing in memory. -[Azure Blob Storage Partition Manager](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio) is another `PartitionManager` implementation that allows multiple EventProcessors to share the load balancing and checkpoint data in a central storage. +[Azure Blob Storage Partition Manager](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/eventhub/azure-eventhubs-checkpointstoreblob-aio) is one of the `PartitionManager` implementation we provide that applies Azure Blob Storage as the persistent store. ```python diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py index ec65d0453d5c..18446249ff23 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/event_processor.py @@ -91,9 +91,9 @@ def __init__( :param partition_processor_type: A subclass type of ~azure.eventhub.eventprocessor.PartitionProcessor. :type partition_processor_type: type :param partition_manager: Interacts with the data storage that stores ownership and checkpoints data. - ~azure.eventhub.aio.eventprocessor.SamplePartitionManager can be used to save data in memory or to a file. - More sophisticated partition managers are / will be provided as plug-ins. Users can also develop their own - partition managers. + ~azure.eventhub.aio.eventprocessor.SamplePartitionManager demonstrates the basic usage of `PartitionManager` + which stores data in memory or a file. + Users can either use the provided `PartitionManager` plug-ins or develop their own `PartitionManager`. :type partition_manager: Subclass of ~azure.eventhub.eventprocessor.PartitionManager. :param initial_event_position: The event position to start a partition consumer. if the partition has no checkpoint yet. This could be replaced by "reset" checkpoint in the near future. @@ -119,10 +119,10 @@ def __repr__(self): return 'EventProcessor: id {}'.format(self._id) async def start(self): - """Start the EventProcessor + """Start the EventProcessor. - This EventProcessor will then start to balance partition ownership with other EventProcessors - and asynchronously start to receive EventData from EventHub and process events. + The EventProcessor will try to claim and balance partition ownership with other `EventProcessor` + and asynchronously start receiving EventData from EventHub and processing events. :return: None @@ -155,13 +155,13 @@ async def start(self): await asyncio.sleep(self._polling_interval) async def stop(self): - """Stop the EventProcessor + """Stop the EventProcessor. - This EventProcessor will stop receiving events from EventHubs and release the ownership of the partitions + The EventProcessor will stop receiving events from EventHubs and release the ownership of the partitions it is working on. - If other EventProcessors are still working, they will take over these partitions. + Other running EventProcessor will take over these released partitions. - A stopped EventProcessor can be restarted by calling method start() again. + A stopped EventProcessor can be restarted by calling method `start` again. :return: None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_processor.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_processor.py index 9481c032eb86..a16be38e6220 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_processor.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/eventprocessor/partition_processor.py @@ -26,12 +26,14 @@ class PartitionProcessor(ABC): """ async def initialize(self, partition_context: PartitionContext): - """Called when EventProcessor creates this PartitionProcessor. + """This method will be called when `EventProcessor` creates a `PartitionProcessor`. :param partition_context: The context information of this partition. :type partition_context: ~azure.eventhub.aio.eventprocessor.PartitionContext """ + # Please put the code for initialization of PartitionProcessor here. + async def close(self, reason, partition_context: PartitionContext): """Called when EventProcessor stops processing this PartitionProcessor. @@ -46,6 +48,8 @@ async def close(self, reason, partition_context: PartitionContext): """ + # Please put the code for closing PartitionProcessor here. + @abstractmethod async def process_events(self, events: List[EventData], partition_context: PartitionContext): """Called when a batch of events have been received. @@ -58,6 +62,8 @@ async def process_events(self, events: List[EventData], partition_context: Parti """ + # Please put the code for processing events here. + async def process_error(self, error, partition_context: PartitionContext): """Called when an error happens when receiving or processing events @@ -68,3 +74,5 @@ async def process_error(self, error, partition_context: PartitionContext): :type partition_context: ~azure.eventhub.aio.eventprocessor.PartitionContext """ + + # Please put the code for processing error here. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index 7a25fbdd7bac..a9656eb773d7 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -261,18 +261,14 @@ def encode_message(self): class EventDataBatch(object): """ - It's much faster to send EventData in a batch than individually. But putting too much EventData in one batch - may exceed the frame size limit of the event hub. - EventDataBatch helps you build the maximum allowed size batch of EventData to improve performance - within the size limit - - Use create_batch method of ~azure.eventhub.EventHubProducer or ~azure.eventhub.aio.EventHubProducer - to create an EventDataBatch object. It retrieves the frame size limit from the service. - Use method EventDataBatch.try_add to build the list until a ValueError is raised, - and use send method of ~azure.eventhub.EventHubProducer or ~azure.eventhub.aio.EventHubProducer - to send out the EventData batch to EventHub - - Do not instantiate an EventDataBatch object using its constructor. + Sending events in batch get better performance than sending individual events. + EventDataBatch helps you create the maximum allowed size batch of `EventData` to improve sending performance. + + Use `try_add` method to add events until the maximum batch size limit in bytes has been reached - a `ValueError` will be raised. + Use `send` method of ~azure.eventhub.EventHubProducer or ~azure.eventhub.aio.EventHubProducer for sending. + + Please use the `create_batch` method of `EventHubProducer` + to create an `EventDataBatch` object instead of instantiating an `EventDataBatch` object directly. """ def __init__(self, max_size=None, partition_key=None): From d5c839be26ee385c8c8f24c81e9430921306cd12 Mon Sep 17 00:00:00 2001 From: yijxie Date: Tue, 10 Sep 2019 08:15:53 -0700 Subject: [PATCH 51/51] Fix a pylint error --- sdk/eventhub/azure-eventhubs/azure/eventhub/common.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index a9656eb773d7..5923d7f57972 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -264,7 +264,8 @@ class EventDataBatch(object): Sending events in batch get better performance than sending individual events. EventDataBatch helps you create the maximum allowed size batch of `EventData` to improve sending performance. - Use `try_add` method to add events until the maximum batch size limit in bytes has been reached - a `ValueError` will be raised. + Use `try_add` method to add events until the maximum batch size limit in bytes has been reached - + a `ValueError` will be raised. Use `send` method of ~azure.eventhub.EventHubProducer or ~azure.eventhub.aio.EventHubProducer for sending. Please use the `create_batch` method of `EventHubProducer`