Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
43 commits
Select commit Hold shift + click to select a range
3a32907
Draft EventProcessor Loadbalancing
Aug 22, 2019
39b1b86
EventProcessor Load balancing
Aug 22, 2019
17f5153
small changes from bryan's review
Aug 23, 2019
04ef548
remove checkpoint manager from initialize
Aug 23, 2019
9be1741
small changes
Aug 23, 2019
1b5753c
Draft EventProcessor Loadbalancing
Aug 22, 2019
b4b77f9
EventProcessor Load balancing
Aug 22, 2019
1787fdd
small changes from bryan's review
Aug 23, 2019
c2d0155
remove checkpoint manager from initialize
Aug 23, 2019
1074385
small changes
Aug 23, 2019
386baf0
Fix code review feedback
Aug 29, 2019
1afbf0c
Merge branch 'eventhubs_yx' of github.com:Azure/azure-sdk-for-python …
Aug 30, 2019
c126bea
Packaging update of azure-mgmt-datalake-analytics
AutorestCI Aug 30, 2019
40c7f03
Packaging update of azure-loganalytics
AutorestCI Aug 30, 2019
cf22c7c
Packaging update of azure-mgmt-storage
AutorestCI Aug 30, 2019
c7440b2
Merge branch 'eventhubs_preview3' into eventhubs_yx
Aug 30, 2019
fa804f4
code review fixes and pylint error
Aug 30, 2019
470cf7e
Merge branch 'eventhubs_yx' of github.com:Azure/azure-sdk-for-python …
Aug 30, 2019
e5f3b50
reduce dictionary access
Aug 30, 2019
8343876
Revert "Packaging update of azure-mgmt-storage"
Sep 2, 2019
66c5b31
Revert "Packaging update of azure-loganalytics"
Sep 2, 2019
bcd851a
Revert "Packaging update of azure-mgmt-datalake-analytics"
Sep 2, 2019
d740bb0
Trivial code change
Sep 2, 2019
aad6978
Refine exception handling for eventprocessor
Sep 3, 2019
a55dc13
Enable pylint for eventprocessor
Sep 3, 2019
a339985
Expose OwnershipLostError
Sep 3, 2019
9102713
Move eventprocessor to aio
Sep 4, 2019
278592c
change checkpoint_manager to partition context
Sep 4, 2019
665f28c
fix pylint error
Sep 4, 2019
0060f9d
fix a small issue
Sep 4, 2019
7b4273a
Catch list_ownership/claim_ownership exceptions and retry
Sep 5, 2019
bdf97c8
Fix code review issues
Sep 6, 2019
02a4daf
fix event processor long running test
Sep 6, 2019
a9446de
Remove utils.py
Sep 6, 2019
8dfdec9
Remove close() method
Sep 6, 2019
2aace82
Updated docstrings
Sep 6, 2019
36ba0a3
add pytest
Sep 7, 2019
7f95d9e
small fixes
Sep 7, 2019
f5870af
Merge branch 'eventhubs_preview3' into eventhubs_yx
Sep 7, 2019
f30d143
Revert "Remove utils.py"
Sep 7, 2019
893bee0
change asyncio.create_task to 3.5 friendly code
Sep 7, 2019
4b41fa5
Remove Callable
Sep 7, 2019
fef0551
raise CancelledError instead of break
Sep 7, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
ignore-patterns=test_*,conftest,setup
reports=no

# PYLINT DIRECTORY BLACKLIST. Ignore eventprocessor temporarily until new eventprocessor code is merged to master
ignore=_generated,samples,examples,test,tests,doc,.tox,eventprocessor
# PYLINT DIRECTORY BLACKLIST.
ignore=_generated,samples,examples,test,tests,doc,.tox

init-hook='import sys; sys.path.insert(0, os.path.abspath(os.getcwd().rsplit("azure-sdk-for-python", 1)[0] + "azure-sdk-for-python/scripts/pylint_custom_plugin"))'
load-plugins=pylint_guidelines_checker
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,16 @@

from .event_processor import EventProcessor
from .partition_processor import PartitionProcessor, CloseReason
from .partition_manager import PartitionManager
from .sqlite3_partition_manager import Sqlite3PartitionManager
from .partition_manager import PartitionManager, OwnershipLostError
from .partition_context import PartitionContext
from .sample_partition_manager import SamplePartitionManager

__all__ = [
'CloseReason',
'EventProcessor',
'PartitionProcessor',
'PartitionManager',
'Sqlite3PartitionManager',
]
'OwnershipLostError',
'PartitionContext',
'SamplePartitionManager',
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# -----------------------------------------------------------------------------------

import time
import random
import math
from typing import List
from collections import Counter, defaultdict
from azure.eventhub.aio import EventHubClient
from .partition_manager import PartitionManager


class OwnershipManager(object):
"""Increases or decreases the number of partitions owned by an EventProcessor
so the number of owned partitions are balanced among multiple EventProcessors

An EventProcessor calls claim_ownership() of this class every x seconds,
where x is set by keyword argument "polling_interval" in EventProcessor,
to claim the ownership of partitions, create tasks for the claimed ownership, and cancel tasks that no longer belong
to the claimed ownership.

"""
def __init__(
self, eventhub_client: EventHubClient, consumer_group_name: str, owner_id: str,
partition_manager: PartitionManager, ownership_timeout: float
):
self.cached_parition_ids = [] # type: List[str]
self.eventhub_client = eventhub_client
self.eventhub_name = eventhub_client.eh_name
self.consumer_group_name = consumer_group_name
self.owner_id = owner_id
self.partition_manager = partition_manager
self.ownership_timeout = ownership_timeout

async def claim_ownership(self):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Missing type annotations...

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is internal stuff, so I guess wo don't need type annotations? The module name starts with _ even though this method doesn't.

"""Claims ownership for this EventProcessor
1. Retrieves all partition ids of an event hub from azure event hub service
2. Retrieves current ownership list via this EventProcessor's PartitionManager.
3. Balances number of ownership. Refer to _balance_ownership() for details.
4. Claims the ownership for the balanced number of partitions.

:return: List[Dict[Any]]
"""
if not self.cached_parition_ids:
await self._retrieve_partition_ids()
to_claim = await self._balance_ownership(self.cached_parition_ids)
claimed_list = await self.partition_manager.claim_ownership(to_claim) if to_claim else None
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What does returning None signify (as opposed to returning an empty list/tuple)?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No difference between [ ] and None.
The point of this statement is to skip claim_ownership(to_claim) if to_claim is None or empty, but not returning None or empty list.

return claimed_list

async def _retrieve_partition_ids(self):
"""List all partition ids of the event hub that the EventProcessor is working on.

:return: List[str]
"""
self.cached_parition_ids = await self.eventhub_client.get_partition_ids()

async def _balance_ownership(self, all_partition_ids):
"""Balances and claims ownership of partitions for this EventProcessor.
The balancing algorithm is:
1. Find partitions with inactive ownership and partitions that haven never been claimed before
2. Find the number of active owners, including this EventProcessor, for all partitions.
3. Calculate the average count of partitions that an owner should own.
(number of partitions // number of active owners)
4. Calculate the largest allowed count of partitions that an owner can own.
math.ceil(number of partitions / number of active owners).
This should be equal or 1 greater than the average count
5. Adjust the number of partitions owned by this EventProcessor (owner)
a. if this EventProcessor owns more than largest allowed count, abandon one partition
b. if this EventProcessor owns less than average count, add one from the inactive or unclaimed partitions,
or steal one from another owner that has the largest number of ownership among all owners (EventProcessors)
c. Otherwise, no change to the ownership

The balancing algorithm adjust one partition at a time to gradually build the balanced ownership.
Ownership must be renewed to keep it active. So the returned result includes both existing ownership and
the newly adjusted ownership.
This method balances but doesn't claim ownership. The caller of this method tries to claim the result ownership
list. But it may not successfully claim all of them because of concurrency. Other EventProcessors may happen to
claim a partition at that time. Since balancing and claiming are run in infinite repeatedly,
it achieves balancing among all EventProcessors after some time of running.

:return: List[Dict[str, Any]], A list of ownership.
"""
ownership_list = await self.partition_manager.list_ownership(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would try to remove the ownership_list variable. I had to go back to figure out what the list included when it was referenced below, and the list is the same as ownership_dict.values(). So we basically have two names for the same set of items.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess dict.values() creates a new list every time it's called so it's an O(n) operation.
We should avoid calling the same dict.values() if we can.

self.eventhub_name, self.consumer_group_name
)
now = time.time()
ownership_dict = {x["partition_id"]: x for x in ownership_list} # put the list to dict for fast lookup
not_owned_partition_ids = [pid for pid in all_partition_ids if pid not in ownership_dict]
timed_out_partition_ids = [ownership["partition_id"] for ownership in ownership_list
if ownership["last_modified_time"] + self.ownership_timeout < now]
claimable_partition_ids = not_owned_partition_ids + timed_out_partition_ids
active_ownership = [ownership for ownership in ownership_list
if ownership["last_modified_time"] + self.ownership_timeout >= now]
active_ownership_by_owner = defaultdict(list)
for ownership in active_ownership:
active_ownership_by_owner[ownership["owner_id"]].append(ownership)
active_ownership_self = active_ownership_by_owner[self.owner_id]

# calculate expected count per owner
all_partition_count = len(all_partition_ids)
# owners_count is the number of active owners. If self.owner_id is not yet among the active owners,
# then plus 1 to include self. This will make owners_count >= 1.
owners_count = len(active_ownership_by_owner) + \
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This needs a comment to describe what it is doing.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added comment

(0 if self.owner_id in active_ownership_by_owner else 1)
expected_count_per_owner = all_partition_count // owners_count
most_count_allowed_per_owner = math.ceil(all_partition_count / owners_count)
# end of calculating expected count per owner

to_claim = active_ownership_self
if len(active_ownership_self) > most_count_allowed_per_owner: # needs to abandon a partition
to_claim.pop() # abandon one partition if owned too many
elif len(active_ownership_self) < expected_count_per_owner:
# Either claims an inactive partition, or steals from other owners
if claimable_partition_ids: # claim an inactive partition if there is
random_partition_id = random.choice(claimable_partition_ids)
random_chosen_to_claim = ownership_dict.get(random_partition_id,
{"partition_id": random_partition_id,
"eventhub_name": self.eventhub_name,
"consumer_group_name": self.consumer_group_name
})
random_chosen_to_claim["owner_id"] = self.owner_id
to_claim.append(random_chosen_to_claim)
else: # steal from another owner that has the most count
active_ownership_count_group_by_owner = Counter(
dict((x, len(y)) for x, y in active_ownership_by_owner.items()))
most_frequent_owner_id = active_ownership_count_group_by_owner.most_common(1)[0][0]
# randomly choose a partition to steal from the most_frequent_owner
to_steal_partition = random.choice(active_ownership_by_owner[most_frequent_owner_id])
to_steal_partition["owner_id"] = self.owner_id
to_claim.append(to_steal_partition)
return to_claim
Loading